diff --git a/lib/libpcap/gencode.c b/lib/libpcap/gencode.c index 70cb90f80..657692ec8 100644 --- a/lib/libpcap/gencode.c +++ b/lib/libpcap/gencode.c @@ -1,4 +1,4 @@ -/* $OpenBSD: gencode.c,v 1.66 2024/04/08 02:51:14 jsg Exp $ */ +/* $OpenBSD: gencode.c,v 1.67 2024/09/15 07:14:58 jsg Exp $ */ /* * Copyright (c) 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998 @@ -175,7 +175,6 @@ static void * newchunk(size_t n) { struct membag *m; - int k, size; void *p; m = &membag[cur_membag]; diff --git a/regress/lib/libssl/tlsfuzzer/tlsfuzzer.py b/regress/lib/libssl/tlsfuzzer/tlsfuzzer.py index 49cd3f1d6..404cb2d89 100644 --- a/regress/lib/libssl/tlsfuzzer/tlsfuzzer.py +++ b/regress/lib/libssl/tlsfuzzer/tlsfuzzer.py @@ -1,4 +1,4 @@ -# $OpenBSD: tlsfuzzer.py,v 1.54 2024/09/13 05:58:17 tb Exp $ +# $OpenBSD: tlsfuzzer.py,v 1.55 2024/09/14 07:11:34 tb Exp $ # # Copyright (c) 2020 Theo Buehler # @@ -654,7 +654,7 @@ failing_groups = [ ] class TestRunner: - """ Runs the given tests groups against a server and displays stats. """ + """ Runs the given tests against a server and displays stats. """ def __init__( self, timing=False, verbose=False, host="localhost", port=4433, diff --git a/sbin/dump/traverse.c b/sbin/dump/traverse.c index 7863794c8..083225c3f 100644 --- a/sbin/dump/traverse.c +++ b/sbin/dump/traverse.c @@ -1,4 +1,4 @@ -/* $OpenBSD: traverse.c,v 1.42 2024/02/03 18:51:57 beck Exp $ */ +/* $OpenBSD: traverse.c,v 1.43 2024/09/15 07:14:58 jsg Exp $ */ /* $NetBSD: traverse.c,v 1.17 1997/06/05 11:13:27 lukem Exp $ */ /*- @@ -150,7 +150,6 @@ fs_mapinodes(ino_t maxino, int64_t *tapesize, int *anydirskipped) int i, cg, inosused; struct cg *cgp; ino_t ino; - char *cp; if ((cgp = malloc(sblock->fs_cgsize)) == NULL) quit("fs_mapinodes: cannot allocate memory.\n"); diff --git a/sbin/fsck_ffs/pass1.c b/sbin/fsck_ffs/pass1.c index 05e0afe75..d099a3a6d 100644 --- a/sbin/fsck_ffs/pass1.c +++ b/sbin/fsck_ffs/pass1.c @@ -1,4 +1,4 @@ -/* $OpenBSD: pass1.c,v 1.49 2024/02/03 18:51:57 beck Exp $ */ +/* $OpenBSD: pass1.c,v 1.50 2024/09/15 07:14:58 jsg Exp $ */ /* $NetBSD: pass1.c,v 1.16 1996/09/27 22:45:15 christos Exp $ */ /* @@ -71,7 +71,6 @@ pass1(void) u_int c; struct inodesc idesc; daddr_t i, cgd; - u_int8_t *cp; /* * Set file system reserved blocks in used block map. diff --git a/sbin/fsck_ffs/pass5.c b/sbin/fsck_ffs/pass5.c index 81bdd9b7f..92258d8c5 100644 --- a/sbin/fsck_ffs/pass5.c +++ b/sbin/fsck_ffs/pass5.c @@ -1,4 +1,4 @@ -/* $OpenBSD: pass5.c,v 1.51 2024/02/03 18:51:57 beck Exp $ */ +/* $OpenBSD: pass5.c,v 1.52 2024/09/15 07:14:58 jsg Exp $ */ /* $NetBSD: pass5.c,v 1.16 1996/09/27 22:45:18 christos Exp $ */ /* @@ -67,7 +67,7 @@ pass5(void) struct fs *fs = &sblock; daddr_t dbase, dmax; daddr_t d; - long i, k, rewritecg = 0; + long i, rewritecg = 0; ino_t j; struct csum *cs; struct csum_total cstotal; diff --git a/sbin/iked/config.c b/sbin/iked/config.c index d42045095..def970e05 100644 --- a/sbin/iked/config.c +++ b/sbin/iked/config.c @@ -1,4 +1,4 @@ -/* $OpenBSD: config.c,v 1.98 2024/07/13 12:22:46 yasuoka Exp $ */ +/* $OpenBSD: config.c,v 1.99 2024/09/15 11:08:50 yasuoka Exp $ */ /* * Copyright (c) 2019 Tobias Heider @@ -178,6 +178,7 @@ config_free_sa(struct iked *env, struct iked_sa *sa) ibuf_free(sa->sa_eap.id_buf); free(sa->sa_eapid); ibuf_free(sa->sa_eapmsk); + ibuf_free(sa->sa_eapclass); free(sa->sa_cp_addr); free(sa->sa_cp_addr6); diff --git a/sbin/iked/iked.h b/sbin/iked/iked.h index 5d95dd929..d3da0b7b3 100644 --- a/sbin/iked/iked.h +++ b/sbin/iked/iked.h @@ -1,4 +1,4 @@ -/* $OpenBSD: iked.h,v 1.231 2024/07/13 12:22:46 yasuoka Exp $ */ +/* $OpenBSD: iked.h,v 1.232 2024/09/15 11:08:50 yasuoka Exp $ */ /* * Copyright (c) 2019 Tobias Heider @@ -491,6 +491,7 @@ struct iked_sa { char *sa_eapid; /* EAP identity */ struct iked_id sa_eap; /* EAP challenge */ struct ibuf *sa_eapmsk; /* EAK session key */ + struct ibuf *sa_eapclass; /* EAP/RADIUS class */ struct iked_proposals sa_proposals; /* SA proposals */ struct iked_childsas sa_childsas; /* IPsec Child SAs */ diff --git a/sbin/iked/ikev2.c b/sbin/iked/ikev2.c index ccbab9de1..b6e8ecee9 100644 --- a/sbin/iked/ikev2.c +++ b/sbin/iked/ikev2.c @@ -1,4 +1,4 @@ -/* $OpenBSD: ikev2.c,v 1.387 2024/07/13 12:22:46 yasuoka Exp $ */ +/* $OpenBSD: ikev2.c,v 1.388 2024/09/15 11:08:50 yasuoka Exp $ */ /* * Copyright (c) 2019 Tobias Heider @@ -4774,6 +4774,8 @@ ikev2_ikesa_enable(struct iked *env, struct iked_sa *sa, struct iked_sa *nsa) /* sa_eapid needs to be set on both for radius accounting */ if (sa->sa_eapid) nsa->sa_eapid = strdup(sa->sa_eapid); + if (sa->sa_eapclass) + nsa->sa_eapclass = ibuf_dup(sa->sa_eapclass); log_info("%srekeyed as new IKESA %s (enc %s%s%s group %s prf %s)", SPI_SA(sa, NULL), print_spi(nsa->sa_hdr.sh_ispi, 8), diff --git a/sbin/iked/radius.c b/sbin/iked/radius.c index e14c83560..fcaf52198 100644 --- a/sbin/iked/radius.c +++ b/sbin/iked/radius.c @@ -1,4 +1,4 @@ -/* $OpenBSD: radius.c,v 1.12 2024/09/11 00:41:51 yasuoka Exp $ */ +/* $OpenBSD: radius.c,v 1.13 2024/09/15 11:08:50 yasuoka Exp $ */ /* * Copyright (c) 2024 Internet Initiative Japan Inc. @@ -270,6 +270,16 @@ iked_radius_on_event(int fd, short ev, void *ctx) req->rr_sa->sa_eapid = req->rr_user; req->rr_user = NULL; + if (radius_get_raw_attr_ptr(pkt, RADIUS_TYPE_CLASS, &attrval, + &attrlen) == 0) { + ibuf_free(req->rr_sa->sa_eapclass); + if ((req->rr_sa->sa_eapclass = ibuf_new(attrval, + attrlen)) == NULL) { + log_info("%s: ibuf_new() failed: %s", __func__, + strerror(errno)); + } + } + sa_state(env, req->rr_sa, IKEV2_STATE_AUTH_SUCCESS); /* Map RADIUS attributes to cp */ @@ -748,6 +758,10 @@ iked_radius_acct_request(struct iked *env, struct iked_sa *sa, uint8_t stype) switch (stype) { case RADIUS_ACCT_STATUS_TYPE_START: + if (req->rr_sa && req->rr_sa->sa_eapclass != NULL) + radius_put_raw_attr(pkt, RADIUS_TYPE_CLASS, + ibuf_data(req->rr_sa->sa_eapclass), + ibuf_size(req->rr_sa->sa_eapclass)); break; case RADIUS_ACCT_STATUS_TYPE_INTERIM_UPDATE: case RADIUS_ACCT_STATUS_TYPE_STOP: diff --git a/sbin/quotacheck/quotacheck.c b/sbin/quotacheck/quotacheck.c index 6151963fc..3fda8cf55 100644 --- a/sbin/quotacheck/quotacheck.c +++ b/sbin/quotacheck/quotacheck.c @@ -1,4 +1,4 @@ -/* $OpenBSD: quotacheck.c,v 1.42 2024/02/03 18:51:57 beck Exp $ */ +/* $OpenBSD: quotacheck.c,v 1.43 2024/09/15 07:14:58 jsg Exp $ */ /* $NetBSD: quotacheck.c,v 1.12 1996/03/30 22:34:25 mark Exp $ */ /* @@ -269,7 +269,6 @@ chkquota(const char *vfstype, const char *fsname, const char *mntpt, int cg, i, mode, errs = 0, status; ino_t ino, inosused; pid_t pid; - char *cp; switch (pid = fork()) { case -1: /* error */ diff --git a/sys/conf/files b/sys/conf/files index 60913cd12..98c386352 100644 --- a/sys/conf/files +++ b/sys/conf/files @@ -1,4 +1,4 @@ -# $OpenBSD: files,v 1.738 2024/09/09 03:50:14 jsg Exp $ +# $OpenBSD: files,v 1.740 2024/09/14 11:06:48 jsg Exp $ # $NetBSD: files,v 1.87 1996/05/19 17:17:50 jonathan Exp $ # @(#)files.newconf 7.5 (Berkeley) 5/10/93 @@ -471,7 +471,7 @@ file dev/usb/xhci.c xhci needs-flag # AMD Cryptographic Co-processor device ccp {} -file dev/ic/ccp.c ccp needs-flag +file dev/ic/ccp.c ccp # AMD Platform Security Processor device psp @@ -864,7 +864,7 @@ file net/if_vether.c vether file net/if_rport.c rport file net/if_pair.c pair file net/if_pppx.c pppx needs-count -file net/if_vxlan.c vxlan needs-count +file net/if_vxlan.c vxlan file net/if_wg.c wg file net/wg_noise.c wg file net/wg_cookie.c wg diff --git a/sys/dev/pv/files.pv b/sys/dev/pv/files.pv index bec16181f..b75e4e7e2 100644 --- a/sys/dev/pv/files.pv +++ b/sys/dev/pv/files.pv @@ -1,4 +1,4 @@ -# $OpenBSD: files.pv,v 1.17 2023/04/20 19:28:31 jcs Exp $ +# $OpenBSD: files.pv,v 1.18 2024/09/14 09:21:13 jsg Exp $ # # Config file and device description for paravirtual devices. # Included by ports that need it. @@ -11,7 +11,7 @@ file dev/pv/pvbus.c pvbus needs-flag # KVM clock device pvclock attach pvclock at pvbus -file dev/pv/pvclock.c pvclock needs-flag +file dev/pv/pvclock.c pvclock # VMware Tools device vmt diff --git a/usr.bin/calendar/calendars/calendar.history b/usr.bin/calendar/calendars/calendar.history index 26aeefc68..b5c7fb72a 100644 --- a/usr.bin/calendar/calendars/calendar.history +++ b/usr.bin/calendar/calendars/calendar.history @@ -1,7 +1,7 @@ /* * History * - * $OpenBSD: calendar.history,v 1.82 2020/04/19 21:08:39 jmc Exp $ + * $OpenBSD: calendar.history,v 1.83 2024/09/14 20:15:24 schwarze Exp $ */ #ifndef _calendar_history_ @@ -433,7 +433,6 @@ 11/07 Lewis and Clark Expedition in sight of the Pacific Ocean, 1805 11/08 Invasion of Sweden by Danish forces results in the Stockholm Bloodbath, 1520 -11/09 Giant panda discovered (?!), China, 1927 11/09 Jack the Ripper kills fifth and final victim, Jane Kelly, 1888 11/10 Henry Stanley asks David Livingstone, "Dr. Livingstone, I presume?", 1871 diff --git a/usr.bin/rpcinfo/rpcinfo.c b/usr.bin/rpcinfo/rpcinfo.c index c407170cc..124b894cb 100644 --- a/usr.bin/rpcinfo/rpcinfo.c +++ b/usr.bin/rpcinfo/rpcinfo.c @@ -1,4 +1,4 @@ -/* $OpenBSD: rpcinfo.c,v 1.19 2024/08/16 16:00:30 florian Exp $ */ +/* $OpenBSD: rpcinfo.c,v 1.20 2024/09/15 07:14:58 jsg Exp $ */ /* * Copyright (c) 2010, Oracle America, Inc. @@ -489,7 +489,6 @@ void pmapdump(int argc, char **argv) { struct sockaddr_in server_addr; - struct hostent *hp; struct pmaplist *head = NULL; int socket = RPC_ANYSOCK; struct timeval minutetimeout; diff --git a/usr.bin/ssh/auth.c b/usr.bin/ssh/auth.c index 3fec9d71b..399d75dd4 100644 --- a/usr.bin/ssh/auth.c +++ b/usr.bin/ssh/auth.c @@ -1,4 +1,4 @@ -/* $OpenBSD: auth.c,v 1.161 2024/05/17 00:30:23 djm Exp $ */ +/* $OpenBSD: auth.c,v 1.162 2024/09/15 01:18:26 djm Exp $ */ /* * Copyright (c) 2000 Markus Friedl. All rights reserved. * @@ -421,6 +421,7 @@ getpwnamallow(struct ssh *ssh, const char *user) ci = server_get_connection_info(ssh, 1, options.use_dns); ci->user = user; + ci->user_invalid = getpwnam(user) == NULL; parse_server_match_config(&options, &includes, ci); log_change_level(options.log_level); log_verbose_reset(); diff --git a/usr.bin/ssh/kexsntrup761x25519.c b/usr.bin/ssh/kexsntrup761x25519.c index e3f7831d3..c2055d76c 100644 --- a/usr.bin/ssh/kexsntrup761x25519.c +++ b/usr.bin/ssh/kexsntrup761x25519.c @@ -1,4 +1,4 @@ -/* $OpenBSD: kexsntrup761x25519.c,v 1.2 2021/12/05 12:28:27 jsg Exp $ */ +/* $OpenBSD: kexsntrup761x25519.c,v 1.3 2024/09/15 02:20:51 djm Exp $ */ /* * Copyright (c) 2019 Markus Friedl. All rights reserved. * @@ -35,6 +35,10 @@ #include "digest.h" #include "ssherr.h" +volatile crypto_int16 crypto_int16_optblocker = 0; +volatile crypto_int32 crypto_int32_optblocker = 0; +volatile crypto_int64 crypto_int64_optblocker = 0; + int kex_kem_sntrup761x25519_keypair(struct kex *kex) { diff --git a/usr.bin/ssh/monitor.c b/usr.bin/ssh/monitor.c index eea560023..6d34e7240 100644 --- a/usr.bin/ssh/monitor.c +++ b/usr.bin/ssh/monitor.c @@ -1,4 +1,4 @@ -/* $OpenBSD: monitor.c,v 1.242 2024/09/09 02:39:57 djm Exp $ */ +/* $OpenBSD: monitor.c,v 1.244 2024/09/15 01:09:40 djm Exp $ */ /* * Copyright 2002 Niels Provos * Copyright 2002 Markus Friedl @@ -81,6 +81,7 @@ #include "match.h" #include "ssherr.h" #include "sk-api.h" +#include "srclimit.h" #ifdef GSSAPI static Gssctxt *gsscontext = NULL; @@ -723,6 +724,15 @@ mm_answer_pwnamallow(struct ssh *ssh, int sock, struct sshbuf *m) ssh_packet_set_log_preamble(ssh, "%suser %s", authctxt->valid ? "authenticating" : "invalid ", authctxt->user); + if (options.refuse_connection) { + logit("administratively prohibited connection for " + "%s%s from %.128s port %d", + authctxt->valid ? "" : "invalid user ", + authctxt->user, ssh_remote_ipaddr(ssh), + ssh_remote_port(ssh)); + cleanup_exit(EXIT_CONFIG_REFUSED); + } + /* Send active options to unpriv */ mm_encode_server_options(m); @@ -1243,7 +1253,7 @@ mm_answer_keyverify(struct ssh *ssh, int sock, struct sshbuf *m) } auth2_record_key(authctxt, ret == 0, key); - if (key_blobtype == MM_USERKEY) + if (key_blobtype == MM_USERKEY && ret == 0) auth_activate_options(ssh, key_opts); monitor_reset_key_state(); diff --git a/usr.bin/ssh/readconf.c b/usr.bin/ssh/readconf.c index d8bd498ba..33bb91adb 100644 --- a/usr.bin/ssh/readconf.c +++ b/usr.bin/ssh/readconf.c @@ -1,4 +1,4 @@ -/* $OpenBSD: readconf.c,v 1.389 2024/09/03 05:29:55 djm Exp $ */ +/* $OpenBSD: readconf.c,v 1.390 2024/09/15 00:57:36 djm Exp $ */ /* * Author: Tatu Ylonen * Copyright (c) 1995 Tatu Ylonen , Espoo, Finland @@ -683,11 +683,11 @@ expand_match_exec_or_include_path(const char *path, Options *options, * Parse and execute a Match directive. */ static int -match_cfg_line(Options *options, char **condition, struct passwd *pw, - const char *host_arg, const char *original_host, int final_pass, - int *want_final_pass, const char *filename, int linenum) +match_cfg_line(Options *options, const char *full_line, int *acp, char ***avp, + struct passwd *pw, const char *host_arg, const char *original_host, + int final_pass, int *want_final_pass, const char *filename, int linenum) { - char *arg, *oattrib, *attrib, *cmd, *cp = *condition, *host, *criteria; + char *arg, *oattrib, *attrib, *cmd, *host, *criteria; const char *ruser; int r, this_result, result = 1, attributes = 0, negate; @@ -707,11 +707,11 @@ match_cfg_line(Options *options, char **condition, struct passwd *pw, } debug2("checking match for '%s' host %s originally %s", - cp, host, original_host); - while ((oattrib = attrib = strdelim(&cp)) && *attrib != '\0') { + full_line, host, original_host); + while ((oattrib = attrib = argv_next(acp, avp)) != NULL) { /* Terminate on comment */ if (*attrib == '#') { - cp = NULL; /* mark all arguments consumed */ + argv_consume(acp); break; } arg = criteria = NULL; @@ -720,7 +720,8 @@ match_cfg_line(Options *options, char **condition, struct passwd *pw, attrib++; /* Criterion "all" has no argument and must appear alone */ if (strcasecmp(attrib, "all") == 0) { - if (attributes > 1 || ((arg = strdelim(&cp)) != NULL && + if (attributes > 1 || + ((arg = argv_next(acp, avp)) != NULL && *arg != '\0' && *arg != '#')) { error("%.200s line %d: '%s' cannot be combined " "with other Match attributes", @@ -729,7 +730,7 @@ match_cfg_line(Options *options, char **condition, struct passwd *pw, goto out; } if (arg != NULL && *arg == '#') - cp = NULL; /* mark all arguments consumed */ + argv_consume(acp); /* consume remaining args */ if (result) result = negate ? 0 : 1; goto out; @@ -754,7 +755,7 @@ match_cfg_line(Options *options, char **condition, struct passwd *pw, continue; } /* All other criteria require an argument */ - if ((arg = strdelim(&cp)) == NULL || + if ((arg = argv_next(acp, avp)) == NULL || *arg == '\0' || *arg == '#') { error("Missing Match criteria for %s", attrib); result = -1; @@ -841,7 +842,6 @@ match_cfg_line(Options *options, char **condition, struct passwd *pw, out: if (result != -1) debug2("match %sfound", result ? "" : "not "); - *condition = cp; free(host); return result; } @@ -1784,8 +1784,8 @@ parse_pubkey_algos: "option"); goto out; } - value = match_cfg_line(options, &str, pw, host, original_host, - flags & SSHCONF_FINAL, want_final_pass, + value = match_cfg_line(options, str, &ac, &av, pw, host, + original_host, flags & SSHCONF_FINAL, want_final_pass, filename, linenum); if (value < 0) { error("%.200s line %d: Bad Match condition", filename, @@ -1793,13 +1793,6 @@ parse_pubkey_algos: goto out; } *activep = (flags & SSHCONF_NEVERMATCH) ? 0 : value; - /* - * If match_cfg_line() didn't consume all its arguments then - * arrange for the extra arguments check below to fail. - */ - - if (str == NULL || *str == '\0') - argv_consume(&ac); break; case oEscapeChar: diff --git a/usr.bin/ssh/servconf.c b/usr.bin/ssh/servconf.c index 9b4bcfeea..347300e12 100644 --- a/usr.bin/ssh/servconf.c +++ b/usr.bin/ssh/servconf.c @@ -1,4 +1,4 @@ -/* $OpenBSD: servconf.c,v 1.413 2024/08/17 08:23:04 djm Exp $ */ +/* $OpenBSD: servconf.c,v 1.418 2024/09/15 03:09:44 djm Exp $ */ /* * Copyright (c) 1995 Tatu Ylonen , Espoo, Finland * All rights reserved @@ -155,6 +155,7 @@ initialize_server_options(ServerOptions *options) options->per_source_penalty.penalty_authfail = -1; options->per_source_penalty.penalty_noauth = -1; options->per_source_penalty.penalty_grace = -1; + options->per_source_penalty.penalty_refuseconnection = -1; options->per_source_penalty.penalty_max = -1; options->per_source_penalty.penalty_min = -1; options->max_authtries = -1; @@ -190,6 +191,7 @@ initialize_server_options(ServerOptions *options) options->num_channel_timeouts = 0; options->unused_connection_timeout = -1; options->sshd_session_path = NULL; + options->refuse_connection = -1; } /* Returns 1 if a string option is unset or set to "none" or 0 otherwise. */ @@ -407,6 +409,8 @@ fill_default_server_options(ServerOptions *options) options->per_source_penalty.penalty_authfail = 5; if (options->per_source_penalty.penalty_noauth == -1) options->per_source_penalty.penalty_noauth = 1; + if (options->per_source_penalty.penalty_refuseconnection == -1) + options->per_source_penalty.penalty_refuseconnection = 10; if (options->per_source_penalty.penalty_min == -1) options->per_source_penalty.penalty_min = 15; if (options->per_source_penalty.penalty_max == -1) @@ -457,6 +461,8 @@ fill_default_server_options(ServerOptions *options) options->unused_connection_timeout = 0; if (options->sshd_session_path == NULL) options->sshd_session_path = xstrdup(_PATH_SSHD_SESSION); + if (options->refuse_connection == -1) + options->refuse_connection = 0; assemble_algorithms(options); @@ -536,7 +542,7 @@ typedef enum { sAllowStreamLocalForwarding, sFingerprintHash, sDisableForwarding, sExposeAuthInfo, sRDomain, sPubkeyAuthOptions, sSecurityKeyProvider, sRequiredRSASize, sChannelTimeout, sUnusedConnectionTimeout, - sSshdSessionPath, + sSshdSessionPath, sRefuseConnection, sDeprecated, sIgnore, sUnsupported } ServerOpCodes; @@ -686,6 +692,7 @@ static struct { { "channeltimeout", sChannelTimeout, SSHCFG_ALL }, { "unusedconnectiontimeout", sUnusedConnectionTimeout, SSHCFG_ALL }, { "sshdsessionpath", sSshdSessionPath, SSHCFG_GLOBAL }, + { "refuseconnection", sRefuseConnection, SSHCFG_ALL }, { NULL, sBadOption, 0 } }; @@ -962,43 +969,57 @@ match_test_missing_fatal(const char *criteria, const char *attrib) * not match. */ static int -match_cfg_line(char **condition, int line, struct connection_info *ci) +match_cfg_line(const char *full_line, int *acp, char ***avp, + int line, struct connection_info *ci) { int result = 1, attributes = 0, port; - char *arg, *attrib, *cp = *condition; + char *arg, *attrib; if (ci == NULL) - debug3("checking syntax for 'Match %s'", cp); - else - debug3("checking match for '%s' user %s host %s addr %s " - "laddr %s lport %d", cp, ci->user ? ci->user : "(null)", + debug3("checking syntax for 'Match %s'", full_line); + else { + debug3("checking match for '%s' user %s%s host %s addr %s " + "laddr %s lport %d", full_line, + ci->user ? ci->user : "(null)", + ci->user_invalid ? " (invalid)" : "", ci->host ? ci->host : "(null)", ci->address ? ci->address : "(null)", ci->laddress ? ci->laddress : "(null)", ci->lport); + } - while ((attrib = strdelim(&cp)) && *attrib != '\0') { + while ((attrib = argv_next(acp, avp)) != NULL) { /* Terminate on comment */ if (*attrib == '#') { - cp = NULL; /* mark all arguments consumed */ + argv_consume(acp); /* mark all arguments consumed */ break; } arg = NULL; attributes++; /* Criterion "all" has no argument and must appear alone */ if (strcasecmp(attrib, "all") == 0) { - if (attributes > 1 || ((arg = strdelim(&cp)) != NULL && + if (attributes > 1 || + ((arg = argv_next(acp, avp)) != NULL && *arg != '\0' && *arg != '#')) { error("'all' cannot be combined with other " "Match attributes"); return -1; } if (arg != NULL && *arg == '#') - cp = NULL; /* mark all arguments consumed */ - *condition = cp; + argv_consume(acp); /* consume remaining args */ return 1; } + /* Criterion "invalid-user" also has no argument */ + if (strcasecmp(attrib, "invalid-user") == 0) { + if (ci == NULL) + continue; + if (ci->user_invalid == 0) + result = 0; + else + debug("matched invalid-user at line %d", line); + continue; + } /* All other criteria require an argument */ - if ((arg = strdelim(&cp)) == NULL || + if ((arg = argv_next(acp, avp)) == NULL || *arg == '\0' || *arg == '#') { error("Missing Match criteria for %s", attrib); return -1; @@ -1129,7 +1150,6 @@ match_cfg_line(char **condition, int line, struct connection_info *ci) } if (ci != NULL) debug3("match %sfound", result ? "" : "not "); - *condition = cp; return result; } @@ -1972,6 +1992,9 @@ process_server_config_line_depth(ServerOptions *options, char *line, } else if (strncmp(arg, "grace-exceeded:", 15) == 0) { p = arg + 15; intptr = &options->per_source_penalty.penalty_grace; + } else if (strncmp(arg, "refuseconnection:", 17) == 0) { + p = arg + 17; + intptr = &options->per_source_penalty.penalty_refuseconnection; } else if (strncmp(arg, "max:", 4) == 0) { p = arg + 4; intptr = &options->per_source_penalty.penalty_max; @@ -2250,7 +2273,7 @@ process_server_config_line_depth(ServerOptions *options, char *line, if (cmdline) fatal("Match directive not supported as a command-line " "option"); - value = match_cfg_line(&str, linenum, + value = match_cfg_line(str, &ac, &av, linenum, (*inc_flags & SSHCFG_NEVERMATCH ? NULL : connectinfo)); if (value < 0) fatal("%s line %d: Bad Match condition", filename, @@ -2261,12 +2284,6 @@ process_server_config_line_depth(ServerOptions *options, char *line, * match block. */ *inc_flags &= ~SSHCFG_MATCH_ONLY; - /* - * If match_cfg_line() didn't consume all its arguments then - * arrange for the extra arguments check below to fail. - */ - if (str == NULL || *str == '\0') - argv_consume(&ac); break; case sPermitListen: @@ -2579,6 +2596,11 @@ process_server_config_line_depth(ServerOptions *options, char *line, charptr = &options->sshd_session_path; goto parse_filename; + case sRefuseConnection: + intptr = &options->refuse_connection; + multistate_ptr = multistate_flag; + goto parse_multistate; + case sDeprecated: case sIgnore: case sUnsupported: @@ -2693,6 +2715,8 @@ int parse_server_match_testspec(struct connection_info *ci, char *spec) " specification %s\n", p+6, p); return -1; } + } else if (strcmp(p, "invalid-user") == 0) { + ci->user_invalid = 1; } else { fprintf(stderr, "Invalid test mode specification %s\n", p); @@ -2794,6 +2818,7 @@ copy_set_server_options(ServerOptions *dst, ServerOptions *src, int preauth) M_CP_INTOPT(log_level); M_CP_INTOPT(required_rsa_size); M_CP_INTOPT(unused_connection_timeout); + M_CP_INTOPT(refuse_connection); /* * The bind_mask is a mode_t that may be unsigned, so we can't use @@ -3116,6 +3141,7 @@ dump_config(ServerOptions *o) dump_cfg_fmtint(sStreamLocalBindUnlink, o->fwd_opts.streamlocal_bind_unlink); dump_cfg_fmtint(sFingerprintHash, o->fingerprint_hash); dump_cfg_fmtint(sExposeAuthInfo, o->expose_userauth_info); + dump_cfg_fmtint(sRefuseConnection, o->refuse_connection); /* string arguments */ dump_cfg_string(sPidFile, o->pid_file); @@ -3236,12 +3262,14 @@ dump_config(ServerOptions *o) if (o->per_source_penalty.enabled) { printf("persourcepenalties crash:%d authfail:%d noauth:%d " - "grace-exceeded:%d max:%d min:%d max-sources4:%d " - "max-sources6:%d overflow:%s overflow6:%s\n", + "grace-exceeded:%d refuseconnection:%d max:%d min:%d " + "max-sources4:%d max-sources6:%d " + "overflow:%s overflow6:%s\n", o->per_source_penalty.penalty_crash, o->per_source_penalty.penalty_authfail, o->per_source_penalty.penalty_noauth, o->per_source_penalty.penalty_grace, + o->per_source_penalty.penalty_refuseconnection, o->per_source_penalty.penalty_max, o->per_source_penalty.penalty_min, o->per_source_penalty.max_sources4, diff --git a/usr.bin/ssh/servconf.h b/usr.bin/ssh/servconf.h index 442dacdcd..33ada42e0 100644 --- a/usr.bin/ssh/servconf.h +++ b/usr.bin/ssh/servconf.h @@ -1,4 +1,4 @@ -/* $OpenBSD: servconf.h,v 1.165 2024/06/12 22:36:00 djm Exp $ */ +/* $OpenBSD: servconf.h,v 1.168 2024/09/15 01:18:26 djm Exp $ */ /* * Author: Tatu Ylonen @@ -77,6 +77,7 @@ struct per_source_penalty { int penalty_grace; int penalty_authfail; int penalty_noauth; + int penalty_refuseconnection; int penalty_max; int penalty_min; }; @@ -245,11 +246,14 @@ typedef struct { int unused_connection_timeout; char *sshd_session_path; + + int refuse_connection; } ServerOptions; /* Information about the incoming connection as used by Match */ struct connection_info { const char *user; + int user_invalid; const char *host; /* possibly resolved hostname */ const char *address; /* remote address */ const char *laddress; /* local address */ diff --git a/usr.bin/ssh/sntrup761.c b/usr.bin/ssh/sntrup761.c index 3ec225a0a..81ae5dc86 100644 --- a/usr.bin/ssh/sntrup761.c +++ b/usr.bin/ssh/sntrup761.c @@ -1,4 +1,4 @@ -/* $OpenBSD: sntrup761.c,v 1.6 2023/01/11 02:13:52 djm Exp $ */ +/* $OpenBSD: sntrup761.c,v 1.7 2024/09/15 02:20:51 djm Exp $ */ /* * Public Domain, Authors: @@ -11,6 +11,8 @@ #include #include "crypto_api.h" +#define crypto_declassify(x, y) do {} while (0) + #define int8 crypto_int8 #define uint8 crypto_uint8 #define int16 crypto_int16 @@ -19,21 +21,1596 @@ #define uint32 crypto_uint32 #define int64 crypto_int64 #define uint64 crypto_uint64 +extern volatile crypto_int16 crypto_int16_optblocker; +extern volatile crypto_int32 crypto_int32_optblocker; +extern volatile crypto_int64 crypto_int64_optblocker; -/* from supercop-20201130/crypto_sort/int32/portable4/int32_minmax.inc */ -#define int32_MINMAX(a,b) \ -do { \ - int64_t ab = (int64_t)b ^ (int64_t)a; \ - int64_t c = (int64_t)b - (int64_t)a; \ - c ^= ab & (c ^ b); \ - c >>= 31; \ - c &= ab; \ - a ^= c; \ - b ^= c; \ -} while(0) +/* from supercop-20240808/cryptoint/crypto_int16.h */ +/* auto-generated: cd cryptoint; ./autogen */ +/* cryptoint 20240806 */ -/* from supercop-20201130/crypto_sort/int32/portable4/sort.c */ +#ifndef crypto_int16_h +#define crypto_int16_h +#define crypto_int16 int16_t +#define crypto_int16_unsigned uint16_t + + + +__attribute__((unused)) +static inline +crypto_int16 crypto_int16_load(const unsigned char *crypto_int16_s) { + crypto_int16 crypto_int16_z = 0; + crypto_int16_z |= ((crypto_int16) (*crypto_int16_s++)) << 0; + crypto_int16_z |= ((crypto_int16) (*crypto_int16_s++)) << 8; + return crypto_int16_z; +} + +__attribute__((unused)) +static inline +void crypto_int16_store(unsigned char *crypto_int16_s,crypto_int16 crypto_int16_x) { + *crypto_int16_s++ = crypto_int16_x >> 0; + *crypto_int16_s++ = crypto_int16_x >> 8; +} + +__attribute__((unused)) +static inline +crypto_int16 crypto_int16_negative_mask(crypto_int16 crypto_int16_x) { +#if defined(__GNUC__) && defined(__x86_64__) + __asm__ ("sarw $15,%0" : "+r"(crypto_int16_x) : : "cc"); + return crypto_int16_x; +#elif defined(__GNUC__) && defined(__aarch64__) + crypto_int16 crypto_int16_y; + __asm__ ("sbfx %w0,%w1,15,1" : "=r"(crypto_int16_y) : "r"(crypto_int16_x) : ); + return crypto_int16_y; +#else + crypto_int16_x >>= 16-6; + crypto_int16_x ^= crypto_int16_optblocker; + crypto_int16_x >>= 5; + return crypto_int16_x; +#endif +} + +__attribute__((unused)) +static inline +crypto_int16_unsigned crypto_int16_unsigned_topbit_01(crypto_int16_unsigned crypto_int16_x) { +#if defined(__GNUC__) && defined(__x86_64__) + __asm__ ("shrw $15,%0" : "+r"(crypto_int16_x) : : "cc"); + return crypto_int16_x; +#elif defined(__GNUC__) && defined(__aarch64__) + crypto_int16 crypto_int16_y; + __asm__ ("ubfx %w0,%w1,15,1" : "=r"(crypto_int16_y) : "r"(crypto_int16_x) : ); + return crypto_int16_y; +#else + crypto_int16_x >>= 16-6; + crypto_int16_x ^= crypto_int16_optblocker; + crypto_int16_x >>= 5; + return crypto_int16_x; +#endif +} + +__attribute__((unused)) +static inline +crypto_int16 crypto_int16_negative_01(crypto_int16 crypto_int16_x) { + return crypto_int16_unsigned_topbit_01(crypto_int16_x); +} + +__attribute__((unused)) +static inline +crypto_int16 crypto_int16_topbit_mask(crypto_int16 crypto_int16_x) { + return crypto_int16_negative_mask(crypto_int16_x); +} + +__attribute__((unused)) +static inline +crypto_int16 crypto_int16_topbit_01(crypto_int16 crypto_int16_x) { + return crypto_int16_unsigned_topbit_01(crypto_int16_x); +} + +__attribute__((unused)) +static inline +crypto_int16 crypto_int16_bottombit_mask(crypto_int16 crypto_int16_x) { +#if defined(__GNUC__) && defined(__x86_64__) + __asm__ ("andw $1,%0" : "+r"(crypto_int16_x) : : "cc"); + return -crypto_int16_x; +#elif defined(__GNUC__) && defined(__aarch64__) + crypto_int16 crypto_int16_y; + __asm__ ("sbfx %w0,%w1,0,1" : "=r"(crypto_int16_y) : "r"(crypto_int16_x) : ); + return crypto_int16_y; +#else + crypto_int16_x &= 1 ^ crypto_int16_optblocker; + return -crypto_int16_x; +#endif +} + +__attribute__((unused)) +static inline +crypto_int16 crypto_int16_bottombit_01(crypto_int16 crypto_int16_x) { +#if defined(__GNUC__) && defined(__x86_64__) + __asm__ ("andw $1,%0" : "+r"(crypto_int16_x) : : "cc"); + return crypto_int16_x; +#elif defined(__GNUC__) && defined(__aarch64__) + crypto_int16 crypto_int16_y; + __asm__ ("ubfx %w0,%w1,0,1" : "=r"(crypto_int16_y) : "r"(crypto_int16_x) : ); + return crypto_int16_y; +#else + crypto_int16_x &= 1 ^ crypto_int16_optblocker; + return crypto_int16_x; +#endif +} + +__attribute__((unused)) +static inline +crypto_int16 crypto_int16_bitinrangepublicpos_mask(crypto_int16 crypto_int16_x,crypto_int16 crypto_int16_s) { +#if defined(__GNUC__) && defined(__x86_64__) + __asm__ ("sarw %%cl,%0" : "+r"(crypto_int16_x) : "c"(crypto_int16_s) : "cc"); +#elif defined(__GNUC__) && defined(__aarch64__) + __asm__ ("sxth %w0,%w0\n asr %w0,%w0,%w1" : "+&r"(crypto_int16_x) : "r"(crypto_int16_s) : ); +#else + crypto_int16_x >>= crypto_int16_s ^ crypto_int16_optblocker; +#endif + return crypto_int16_bottombit_mask(crypto_int16_x); +} + +__attribute__((unused)) +static inline +crypto_int16 crypto_int16_bitinrangepublicpos_01(crypto_int16 crypto_int16_x,crypto_int16 crypto_int16_s) { +#if defined(__GNUC__) && defined(__x86_64__) + __asm__ ("sarw %%cl,%0" : "+r"(crypto_int16_x) : "c"(crypto_int16_s) : "cc"); +#elif defined(__GNUC__) && defined(__aarch64__) + __asm__ ("sxth %w0,%w0\n asr %w0,%w0,%w1" : "+&r"(crypto_int16_x) : "r"(crypto_int16_s) : ); +#else + crypto_int16_x >>= crypto_int16_s ^ crypto_int16_optblocker; +#endif + return crypto_int16_bottombit_01(crypto_int16_x); +} + +__attribute__((unused)) +static inline +crypto_int16 crypto_int16_shlmod(crypto_int16 crypto_int16_x,crypto_int16 crypto_int16_s) { +#if defined(__GNUC__) && defined(__x86_64__) + crypto_int16_s &= 15; + __asm__ ("shlw %%cl,%0" : "+r"(crypto_int16_x) : "c"(crypto_int16_s) : "cc"); +#elif defined(__GNUC__) && defined(__aarch64__) + __asm__ ("and %w0,%w0,15\n and %w1,%w1,65535\n lsl %w1,%w1,%w0" : "+&r"(crypto_int16_s), "+r"(crypto_int16_x) : : ); +#else + int crypto_int16_k, crypto_int16_l; + for (crypto_int16_l = 0,crypto_int16_k = 1;crypto_int16_k < 16;++crypto_int16_l,crypto_int16_k *= 2) + crypto_int16_x ^= (crypto_int16_x ^ (crypto_int16_x << crypto_int16_k)) & crypto_int16_bitinrangepublicpos_mask(crypto_int16_s,crypto_int16_l); +#endif + return crypto_int16_x; +} + +__attribute__((unused)) +static inline +crypto_int16 crypto_int16_shrmod(crypto_int16 crypto_int16_x,crypto_int16 crypto_int16_s) { +#if defined(__GNUC__) && defined(__x86_64__) + crypto_int16_s &= 15; + __asm__ ("sarw %%cl,%0" : "+r"(crypto_int16_x) : "c"(crypto_int16_s) : "cc"); +#elif defined(__GNUC__) && defined(__aarch64__) + __asm__ ("and %w0,%w0,15\n sxth %w1,%w1\n asr %w1,%w1,%w0" : "+&r"(crypto_int16_s), "+r"(crypto_int16_x) : : ); +#else + int crypto_int16_k, crypto_int16_l; + for (crypto_int16_l = 0,crypto_int16_k = 1;crypto_int16_k < 16;++crypto_int16_l,crypto_int16_k *= 2) + crypto_int16_x ^= (crypto_int16_x ^ (crypto_int16_x >> crypto_int16_k)) & crypto_int16_bitinrangepublicpos_mask(crypto_int16_s,crypto_int16_l); +#endif + return crypto_int16_x; +} + +__attribute__((unused)) +static inline +crypto_int16 crypto_int16_bitmod_mask(crypto_int16 crypto_int16_x,crypto_int16 crypto_int16_s) { + crypto_int16_x = crypto_int16_shrmod(crypto_int16_x,crypto_int16_s); + return crypto_int16_bottombit_mask(crypto_int16_x); +} + +__attribute__((unused)) +static inline +crypto_int16 crypto_int16_bitmod_01(crypto_int16 crypto_int16_x,crypto_int16 crypto_int16_s) { + crypto_int16_x = crypto_int16_shrmod(crypto_int16_x,crypto_int16_s); + return crypto_int16_bottombit_01(crypto_int16_x); +} + +__attribute__((unused)) +static inline +crypto_int16 crypto_int16_nonzero_mask(crypto_int16 crypto_int16_x) { +#if defined(__GNUC__) && defined(__x86_64__) + crypto_int16 crypto_int16_q,crypto_int16_z; + __asm__ ("xorw %0,%0\n movw $-1,%1\n testw %2,%2\n cmovnew %1,%0" : "=&r"(crypto_int16_z), "=&r"(crypto_int16_q) : "r"(crypto_int16_x) : "cc"); + return crypto_int16_z; +#elif defined(__GNUC__) && defined(__aarch64__) + crypto_int16 crypto_int16_z; + __asm__ ("tst %w1,65535\n csetm %w0,ne" : "=r"(crypto_int16_z) : "r"(crypto_int16_x) : "cc"); + return crypto_int16_z; +#else + crypto_int16_x |= -crypto_int16_x; + return crypto_int16_negative_mask(crypto_int16_x); +#endif +} + +__attribute__((unused)) +static inline +crypto_int16 crypto_int16_nonzero_01(crypto_int16 crypto_int16_x) { +#if defined(__GNUC__) && defined(__x86_64__) + crypto_int16 crypto_int16_q,crypto_int16_z; + __asm__ ("xorw %0,%0\n movw $1,%1\n testw %2,%2\n cmovnew %1,%0" : "=&r"(crypto_int16_z), "=&r"(crypto_int16_q) : "r"(crypto_int16_x) : "cc"); + return crypto_int16_z; +#elif defined(__GNUC__) && defined(__aarch64__) + crypto_int16 crypto_int16_z; + __asm__ ("tst %w1,65535\n cset %w0,ne" : "=r"(crypto_int16_z) : "r"(crypto_int16_x) : "cc"); + return crypto_int16_z; +#else + crypto_int16_x |= -crypto_int16_x; + return crypto_int16_unsigned_topbit_01(crypto_int16_x); +#endif +} + +__attribute__((unused)) +static inline +crypto_int16 crypto_int16_positive_mask(crypto_int16 crypto_int16_x) { +#if defined(__GNUC__) && defined(__x86_64__) + crypto_int16 crypto_int16_q,crypto_int16_z; + __asm__ ("xorw %0,%0\n movw $-1,%1\n testw %2,%2\n cmovgw %1,%0" : "=&r"(crypto_int16_z), "=&r"(crypto_int16_q) : "r"(crypto_int16_x) : "cc"); + return crypto_int16_z; +#elif defined(__GNUC__) && defined(__aarch64__) + crypto_int16 crypto_int16_z; + __asm__ ("sxth %w0,%w1\n cmp %w0,0\n csetm %w0,gt" : "=r"(crypto_int16_z) : "r"(crypto_int16_x) : "cc"); + return crypto_int16_z; +#else + crypto_int16 crypto_int16_z = -crypto_int16_x; + crypto_int16_z ^= crypto_int16_x & crypto_int16_z; + return crypto_int16_negative_mask(crypto_int16_z); +#endif +} + +__attribute__((unused)) +static inline +crypto_int16 crypto_int16_positive_01(crypto_int16 crypto_int16_x) { +#if defined(__GNUC__) && defined(__x86_64__) + crypto_int16 crypto_int16_q,crypto_int16_z; + __asm__ ("xorw %0,%0\n movw $1,%1\n testw %2,%2\n cmovgw %1,%0" : "=&r"(crypto_int16_z), "=&r"(crypto_int16_q) : "r"(crypto_int16_x) : "cc"); + return crypto_int16_z; +#elif defined(__GNUC__) && defined(__aarch64__) + crypto_int16 crypto_int16_z; + __asm__ ("sxth %w0,%w1\n cmp %w0,0\n cset %w0,gt" : "=r"(crypto_int16_z) : "r"(crypto_int16_x) : "cc"); + return crypto_int16_z; +#else + crypto_int16 crypto_int16_z = -crypto_int16_x; + crypto_int16_z ^= crypto_int16_x & crypto_int16_z; + return crypto_int16_unsigned_topbit_01(crypto_int16_z); +#endif +} + +__attribute__((unused)) +static inline +crypto_int16 crypto_int16_zero_mask(crypto_int16 crypto_int16_x) { +#if defined(__GNUC__) && defined(__x86_64__) + crypto_int16 crypto_int16_q,crypto_int16_z; + __asm__ ("xorw %0,%0\n movw $-1,%1\n testw %2,%2\n cmovew %1,%0" : "=&r"(crypto_int16_z), "=&r"(crypto_int16_q) : "r"(crypto_int16_x) : "cc"); + return crypto_int16_z; +#elif defined(__GNUC__) && defined(__aarch64__) + crypto_int16 crypto_int16_z; + __asm__ ("tst %w1,65535\n csetm %w0,eq" : "=r"(crypto_int16_z) : "r"(crypto_int16_x) : "cc"); + return crypto_int16_z; +#else + return ~crypto_int16_nonzero_mask(crypto_int16_x); +#endif +} + +__attribute__((unused)) +static inline +crypto_int16 crypto_int16_zero_01(crypto_int16 crypto_int16_x) { +#if defined(__GNUC__) && defined(__x86_64__) + crypto_int16 crypto_int16_q,crypto_int16_z; + __asm__ ("xorw %0,%0\n movw $1,%1\n testw %2,%2\n cmovew %1,%0" : "=&r"(crypto_int16_z), "=&r"(crypto_int16_q) : "r"(crypto_int16_x) : "cc"); + return crypto_int16_z; +#elif defined(__GNUC__) && defined(__aarch64__) + crypto_int16 crypto_int16_z; + __asm__ ("tst %w1,65535\n cset %w0,eq" : "=r"(crypto_int16_z) : "r"(crypto_int16_x) : "cc"); + return crypto_int16_z; +#else + return 1-crypto_int16_nonzero_01(crypto_int16_x); +#endif +} + +__attribute__((unused)) +static inline +crypto_int16 crypto_int16_unequal_mask(crypto_int16 crypto_int16_x,crypto_int16 crypto_int16_y) { +#if defined(__GNUC__) && defined(__x86_64__) + crypto_int16 crypto_int16_q,crypto_int16_z; + __asm__ ("xorw %0,%0\n movw $-1,%1\n cmpw %3,%2\n cmovnew %1,%0" : "=&r"(crypto_int16_z), "=&r"(crypto_int16_q) : "r"(crypto_int16_x), "r"(crypto_int16_y) : "cc"); + return crypto_int16_z; +#elif defined(__GNUC__) && defined(__aarch64__) + crypto_int16 crypto_int16_z; + __asm__ ("and %w0,%w1,65535\n cmp %w0,%w2,uxth\n csetm %w0,ne" : "=&r"(crypto_int16_z) : "r"(crypto_int16_x), "r"(crypto_int16_y) : "cc"); + return crypto_int16_z; +#else + return crypto_int16_nonzero_mask(crypto_int16_x ^ crypto_int16_y); +#endif +} + +__attribute__((unused)) +static inline +crypto_int16 crypto_int16_unequal_01(crypto_int16 crypto_int16_x,crypto_int16 crypto_int16_y) { +#if defined(__GNUC__) && defined(__x86_64__) + crypto_int16 crypto_int16_q,crypto_int16_z; + __asm__ ("xorw %0,%0\n movw $1,%1\n cmpw %3,%2\n cmovnew %1,%0" : "=&r"(crypto_int16_z), "=&r"(crypto_int16_q) : "r"(crypto_int16_x), "r"(crypto_int16_y) : "cc"); + return crypto_int16_z; +#elif defined(__GNUC__) && defined(__aarch64__) + crypto_int16 crypto_int16_z; + __asm__ ("and %w0,%w1,65535\n cmp %w0,%w2,uxth\n cset %w0,ne" : "=&r"(crypto_int16_z) : "r"(crypto_int16_x), "r"(crypto_int16_y) : "cc"); + return crypto_int16_z; +#else + return crypto_int16_nonzero_01(crypto_int16_x ^ crypto_int16_y); +#endif +} + +__attribute__((unused)) +static inline +crypto_int16 crypto_int16_equal_mask(crypto_int16 crypto_int16_x,crypto_int16 crypto_int16_y) { +#if defined(__GNUC__) && defined(__x86_64__) + crypto_int16 crypto_int16_q,crypto_int16_z; + __asm__ ("xorw %0,%0\n movw $-1,%1\n cmpw %3,%2\n cmovew %1,%0" : "=&r"(crypto_int16_z), "=&r"(crypto_int16_q) : "r"(crypto_int16_x), "r"(crypto_int16_y) : "cc"); + return crypto_int16_z; +#elif defined(__GNUC__) && defined(__aarch64__) + crypto_int16 crypto_int16_z; + __asm__ ("and %w0,%w1,65535\n cmp %w0,%w2,uxth\n csetm %w0,eq" : "=&r"(crypto_int16_z) : "r"(crypto_int16_x), "r"(crypto_int16_y) : "cc"); + return crypto_int16_z; +#else + return ~crypto_int16_unequal_mask(crypto_int16_x,crypto_int16_y); +#endif +} + +__attribute__((unused)) +static inline +crypto_int16 crypto_int16_equal_01(crypto_int16 crypto_int16_x,crypto_int16 crypto_int16_y) { +#if defined(__GNUC__) && defined(__x86_64__) + crypto_int16 crypto_int16_q,crypto_int16_z; + __asm__ ("xorw %0,%0\n movw $1,%1\n cmpw %3,%2\n cmovew %1,%0" : "=&r"(crypto_int16_z), "=&r"(crypto_int16_q) : "r"(crypto_int16_x), "r"(crypto_int16_y) : "cc"); + return crypto_int16_z; +#elif defined(__GNUC__) && defined(__aarch64__) + crypto_int16 crypto_int16_z; + __asm__ ("and %w0,%w1,65535\n cmp %w0,%w2,uxth\n cset %w0,eq" : "=&r"(crypto_int16_z) : "r"(crypto_int16_x), "r"(crypto_int16_y) : "cc"); + return crypto_int16_z; +#else + return 1-crypto_int16_unequal_01(crypto_int16_x,crypto_int16_y); +#endif +} + +__attribute__((unused)) +static inline +crypto_int16 crypto_int16_min(crypto_int16 crypto_int16_x,crypto_int16 crypto_int16_y) { +#if defined(__GNUC__) && defined(__x86_64__) + __asm__ ("cmpw %1,%0\n cmovgw %1,%0" : "+r"(crypto_int16_x) : "r"(crypto_int16_y) : "cc"); + return crypto_int16_x; +#elif defined(__GNUC__) && defined(__aarch64__) + __asm__ ("sxth %w0,%w0\n cmp %w0,%w1,sxth\n csel %w0,%w0,%w1,lt" : "+&r"(crypto_int16_x) : "r"(crypto_int16_y) : "cc"); + return crypto_int16_x; +#else + crypto_int16 crypto_int16_r = crypto_int16_y ^ crypto_int16_x; + crypto_int16 crypto_int16_z = crypto_int16_y - crypto_int16_x; + crypto_int16_z ^= crypto_int16_r & (crypto_int16_z ^ crypto_int16_y); + crypto_int16_z = crypto_int16_negative_mask(crypto_int16_z); + crypto_int16_z &= crypto_int16_r; + return crypto_int16_x ^ crypto_int16_z; +#endif +} + +__attribute__((unused)) +static inline +crypto_int16 crypto_int16_max(crypto_int16 crypto_int16_x,crypto_int16 crypto_int16_y) { +#if defined(__GNUC__) && defined(__x86_64__) + __asm__ ("cmpw %1,%0\n cmovlw %1,%0" : "+r"(crypto_int16_x) : "r"(crypto_int16_y) : "cc"); + return crypto_int16_x; +#elif defined(__GNUC__) && defined(__aarch64__) + __asm__ ("sxth %w0,%w0\n cmp %w0,%w1,sxth\n csel %w0,%w1,%w0,lt" : "+&r"(crypto_int16_x) : "r"(crypto_int16_y) : "cc"); + return crypto_int16_x; +#else + crypto_int16 crypto_int16_r = crypto_int16_y ^ crypto_int16_x; + crypto_int16 crypto_int16_z = crypto_int16_y - crypto_int16_x; + crypto_int16_z ^= crypto_int16_r & (crypto_int16_z ^ crypto_int16_y); + crypto_int16_z = crypto_int16_negative_mask(crypto_int16_z); + crypto_int16_z &= crypto_int16_r; + return crypto_int16_y ^ crypto_int16_z; +#endif +} + +__attribute__((unused)) +static inline +void crypto_int16_minmax(crypto_int16 *crypto_int16_p,crypto_int16 *crypto_int16_q) { + crypto_int16 crypto_int16_x = *crypto_int16_p; + crypto_int16 crypto_int16_y = *crypto_int16_q; +#if defined(__GNUC__) && defined(__x86_64__) + crypto_int16 crypto_int16_z; + __asm__ ("cmpw %2,%1\n movw %1,%0\n cmovgw %2,%1\n cmovgw %0,%2" : "=&r"(crypto_int16_z), "+&r"(crypto_int16_x), "+r"(crypto_int16_y) : : "cc"); + *crypto_int16_p = crypto_int16_x; + *crypto_int16_q = crypto_int16_y; +#elif defined(__GNUC__) && defined(__aarch64__) + crypto_int16 crypto_int16_r, crypto_int16_s; + __asm__ ("sxth %w0,%w0\n cmp %w0,%w3,sxth\n csel %w1,%w0,%w3,lt\n csel %w2,%w3,%w0,lt" : "+&r"(crypto_int16_x), "=&r"(crypto_int16_r), "=r"(crypto_int16_s) : "r"(crypto_int16_y) : "cc"); + *crypto_int16_p = crypto_int16_r; + *crypto_int16_q = crypto_int16_s; +#else + crypto_int16 crypto_int16_r = crypto_int16_y ^ crypto_int16_x; + crypto_int16 crypto_int16_z = crypto_int16_y - crypto_int16_x; + crypto_int16_z ^= crypto_int16_r & (crypto_int16_z ^ crypto_int16_y); + crypto_int16_z = crypto_int16_negative_mask(crypto_int16_z); + crypto_int16_z &= crypto_int16_r; + crypto_int16_x ^= crypto_int16_z; + crypto_int16_y ^= crypto_int16_z; + *crypto_int16_p = crypto_int16_x; + *crypto_int16_q = crypto_int16_y; +#endif +} + +__attribute__((unused)) +static inline +crypto_int16 crypto_int16_smaller_mask(crypto_int16 crypto_int16_x,crypto_int16 crypto_int16_y) { +#if defined(__GNUC__) && defined(__x86_64__) + crypto_int16 crypto_int16_q,crypto_int16_z; + __asm__ ("xorw %0,%0\n movw $-1,%1\n cmpw %3,%2\n cmovlw %1,%0" : "=&r"(crypto_int16_z), "=&r"(crypto_int16_q) : "r"(crypto_int16_x), "r"(crypto_int16_y) : "cc"); + return crypto_int16_z; +#elif defined(__GNUC__) && defined(__aarch64__) + crypto_int16 crypto_int16_z; + __asm__ ("sxth %w0,%w1\n cmp %w0,%w2,sxth\n csetm %w0,lt" : "=&r"(crypto_int16_z) : "r"(crypto_int16_x), "r"(crypto_int16_y) : "cc"); + return crypto_int16_z; +#else + crypto_int16 crypto_int16_r = crypto_int16_x ^ crypto_int16_y; + crypto_int16 crypto_int16_z = crypto_int16_x - crypto_int16_y; + crypto_int16_z ^= crypto_int16_r & (crypto_int16_z ^ crypto_int16_x); + return crypto_int16_negative_mask(crypto_int16_z); +#endif +} + +__attribute__((unused)) +static inline +crypto_int16 crypto_int16_smaller_01(crypto_int16 crypto_int16_x,crypto_int16 crypto_int16_y) { +#if defined(__GNUC__) && defined(__x86_64__) + crypto_int16 crypto_int16_q,crypto_int16_z; + __asm__ ("xorw %0,%0\n movw $1,%1\n cmpw %3,%2\n cmovlw %1,%0" : "=&r"(crypto_int16_z), "=&r"(crypto_int16_q) : "r"(crypto_int16_x), "r"(crypto_int16_y) : "cc"); + return crypto_int16_z; +#elif defined(__GNUC__) && defined(__aarch64__) + crypto_int16 crypto_int16_z; + __asm__ ("sxth %w0,%w1\n cmp %w0,%w2,sxth\n cset %w0,lt" : "=&r"(crypto_int16_z) : "r"(crypto_int16_x), "r"(crypto_int16_y) : "cc"); + return crypto_int16_z; +#else + crypto_int16 crypto_int16_r = crypto_int16_x ^ crypto_int16_y; + crypto_int16 crypto_int16_z = crypto_int16_x - crypto_int16_y; + crypto_int16_z ^= crypto_int16_r & (crypto_int16_z ^ crypto_int16_x); + return crypto_int16_unsigned_topbit_01(crypto_int16_z); +#endif +} + +__attribute__((unused)) +static inline +crypto_int16 crypto_int16_leq_mask(crypto_int16 crypto_int16_x,crypto_int16 crypto_int16_y) { +#if defined(__GNUC__) && defined(__x86_64__) + crypto_int16 crypto_int16_q,crypto_int16_z; + __asm__ ("xorw %0,%0\n movw $-1,%1\n cmpw %3,%2\n cmovlew %1,%0" : "=&r"(crypto_int16_z), "=&r"(crypto_int16_q) : "r"(crypto_int16_x), "r"(crypto_int16_y) : "cc"); + return crypto_int16_z; +#elif defined(__GNUC__) && defined(__aarch64__) + crypto_int16 crypto_int16_z; + __asm__ ("sxth %w0,%w1\n cmp %w0,%w2,sxth\n csetm %w0,le" : "=&r"(crypto_int16_z) : "r"(crypto_int16_x), "r"(crypto_int16_y) : "cc"); + return crypto_int16_z; +#else + return ~crypto_int16_smaller_mask(crypto_int16_y,crypto_int16_x); +#endif +} + +__attribute__((unused)) +static inline +crypto_int16 crypto_int16_leq_01(crypto_int16 crypto_int16_x,crypto_int16 crypto_int16_y) { +#if defined(__GNUC__) && defined(__x86_64__) + crypto_int16 crypto_int16_q,crypto_int16_z; + __asm__ ("xorw %0,%0\n movw $1,%1\n cmpw %3,%2\n cmovlew %1,%0" : "=&r"(crypto_int16_z), "=&r"(crypto_int16_q) : "r"(crypto_int16_x), "r"(crypto_int16_y) : "cc"); + return crypto_int16_z; +#elif defined(__GNUC__) && defined(__aarch64__) + crypto_int16 crypto_int16_z; + __asm__ ("sxth %w0,%w1\n cmp %w0,%w2,sxth\n cset %w0,le" : "=&r"(crypto_int16_z) : "r"(crypto_int16_x), "r"(crypto_int16_y) : "cc"); + return crypto_int16_z; +#else + return 1-crypto_int16_smaller_01(crypto_int16_y,crypto_int16_x); +#endif +} + +__attribute__((unused)) +static inline +int crypto_int16_ones_num(crypto_int16 crypto_int16_x) { + crypto_int16_unsigned crypto_int16_y = crypto_int16_x; + const crypto_int16 C0 = 0x5555; + const crypto_int16 C1 = 0x3333; + const crypto_int16 C2 = 0x0f0f; + crypto_int16_y -= ((crypto_int16_y >> 1) & C0); + crypto_int16_y = (crypto_int16_y & C1) + ((crypto_int16_y >> 2) & C1); + crypto_int16_y = (crypto_int16_y + (crypto_int16_y >> 4)) & C2; + crypto_int16_y = (crypto_int16_y + (crypto_int16_y >> 8)) & 0xff; + return crypto_int16_y; +} + +__attribute__((unused)) +static inline +int crypto_int16_bottomzeros_num(crypto_int16 crypto_int16_x) { +#if defined(__GNUC__) && defined(__x86_64__) + crypto_int16 fallback = 16; + __asm__ ("bsfw %0,%0\n cmovew %1,%0" : "+&r"(crypto_int16_x) : "r"(fallback) : "cc"); + return crypto_int16_x; +#elif defined(__GNUC__) && defined(__aarch64__) + int64_t crypto_int16_y; + __asm__ ("orr %w0,%w1,-65536\n rbit %w0,%w0\n clz %w0,%w0" : "=r"(crypto_int16_y) : "r"(crypto_int16_x) : ); + return crypto_int16_y; +#else + crypto_int16 crypto_int16_y = crypto_int16_x ^ (crypto_int16_x-1); + crypto_int16_y = ((crypto_int16) crypto_int16_y) >> 1; + crypto_int16_y &= ~(crypto_int16_x & (((crypto_int16) 1) << (16-1))); + return crypto_int16_ones_num(crypto_int16_y); +#endif +} + +#endif + +/* from supercop-20240808/cryptoint/crypto_int32.h */ +/* auto-generated: cd cryptoint; ./autogen */ +/* cryptoint 20240806 */ + +#ifndef crypto_int32_h +#define crypto_int32_h + +#define crypto_int32 int32_t +#define crypto_int32_unsigned uint32_t + + + +__attribute__((unused)) +static inline +crypto_int32 crypto_int32_load(const unsigned char *crypto_int32_s) { + crypto_int32 crypto_int32_z = 0; + crypto_int32_z |= ((crypto_int32) (*crypto_int32_s++)) << 0; + crypto_int32_z |= ((crypto_int32) (*crypto_int32_s++)) << 8; + crypto_int32_z |= ((crypto_int32) (*crypto_int32_s++)) << 16; + crypto_int32_z |= ((crypto_int32) (*crypto_int32_s++)) << 24; + return crypto_int32_z; +} + +__attribute__((unused)) +static inline +void crypto_int32_store(unsigned char *crypto_int32_s,crypto_int32 crypto_int32_x) { + *crypto_int32_s++ = crypto_int32_x >> 0; + *crypto_int32_s++ = crypto_int32_x >> 8; + *crypto_int32_s++ = crypto_int32_x >> 16; + *crypto_int32_s++ = crypto_int32_x >> 24; +} + +__attribute__((unused)) +static inline +crypto_int32 crypto_int32_negative_mask(crypto_int32 crypto_int32_x) { +#if defined(__GNUC__) && defined(__x86_64__) + __asm__ ("sarl $31,%0" : "+r"(crypto_int32_x) : : "cc"); + return crypto_int32_x; +#elif defined(__GNUC__) && defined(__aarch64__) + crypto_int32 crypto_int32_y; + __asm__ ("asr %w0,%w1,31" : "=r"(crypto_int32_y) : "r"(crypto_int32_x) : ); + return crypto_int32_y; +#else + crypto_int32_x >>= 32-6; + crypto_int32_x ^= crypto_int32_optblocker; + crypto_int32_x >>= 5; + return crypto_int32_x; +#endif +} + +__attribute__((unused)) +static inline +crypto_int32_unsigned crypto_int32_unsigned_topbit_01(crypto_int32_unsigned crypto_int32_x) { +#if defined(__GNUC__) && defined(__x86_64__) + __asm__ ("shrl $31,%0" : "+r"(crypto_int32_x) : : "cc"); + return crypto_int32_x; +#elif defined(__GNUC__) && defined(__aarch64__) + crypto_int32 crypto_int32_y; + __asm__ ("lsr %w0,%w1,31" : "=r"(crypto_int32_y) : "r"(crypto_int32_x) : ); + return crypto_int32_y; +#else + crypto_int32_x >>= 32-6; + crypto_int32_x ^= crypto_int32_optblocker; + crypto_int32_x >>= 5; + return crypto_int32_x; +#endif +} + +__attribute__((unused)) +static inline +crypto_int32 crypto_int32_negative_01(crypto_int32 crypto_int32_x) { + return crypto_int32_unsigned_topbit_01(crypto_int32_x); +} + +__attribute__((unused)) +static inline +crypto_int32 crypto_int32_topbit_mask(crypto_int32 crypto_int32_x) { + return crypto_int32_negative_mask(crypto_int32_x); +} + +__attribute__((unused)) +static inline +crypto_int32 crypto_int32_topbit_01(crypto_int32 crypto_int32_x) { + return crypto_int32_unsigned_topbit_01(crypto_int32_x); +} + +__attribute__((unused)) +static inline +crypto_int32 crypto_int32_bottombit_mask(crypto_int32 crypto_int32_x) { +#if defined(__GNUC__) && defined(__x86_64__) + __asm__ ("andl $1,%0" : "+r"(crypto_int32_x) : : "cc"); + return -crypto_int32_x; +#elif defined(__GNUC__) && defined(__aarch64__) + crypto_int32 crypto_int32_y; + __asm__ ("sbfx %w0,%w1,0,1" : "=r"(crypto_int32_y) : "r"(crypto_int32_x) : ); + return crypto_int32_y; +#else + crypto_int32_x &= 1 ^ crypto_int32_optblocker; + return -crypto_int32_x; +#endif +} + +__attribute__((unused)) +static inline +crypto_int32 crypto_int32_bottombit_01(crypto_int32 crypto_int32_x) { +#if defined(__GNUC__) && defined(__x86_64__) + __asm__ ("andl $1,%0" : "+r"(crypto_int32_x) : : "cc"); + return crypto_int32_x; +#elif defined(__GNUC__) && defined(__aarch64__) + crypto_int32 crypto_int32_y; + __asm__ ("ubfx %w0,%w1,0,1" : "=r"(crypto_int32_y) : "r"(crypto_int32_x) : ); + return crypto_int32_y; +#else + crypto_int32_x &= 1 ^ crypto_int32_optblocker; + return crypto_int32_x; +#endif +} + +__attribute__((unused)) +static inline +crypto_int32 crypto_int32_bitinrangepublicpos_mask(crypto_int32 crypto_int32_x,crypto_int32 crypto_int32_s) { +#if defined(__GNUC__) && defined(__x86_64__) + __asm__ ("sarl %%cl,%0" : "+r"(crypto_int32_x) : "c"(crypto_int32_s) : "cc"); +#elif defined(__GNUC__) && defined(__aarch64__) + __asm__ ("asr %w0,%w0,%w1" : "+r"(crypto_int32_x) : "r"(crypto_int32_s) : ); +#else + crypto_int32_x >>= crypto_int32_s ^ crypto_int32_optblocker; +#endif + return crypto_int32_bottombit_mask(crypto_int32_x); +} + +__attribute__((unused)) +static inline +crypto_int32 crypto_int32_bitinrangepublicpos_01(crypto_int32 crypto_int32_x,crypto_int32 crypto_int32_s) { +#if defined(__GNUC__) && defined(__x86_64__) + __asm__ ("sarl %%cl,%0" : "+r"(crypto_int32_x) : "c"(crypto_int32_s) : "cc"); +#elif defined(__GNUC__) && defined(__aarch64__) + __asm__ ("asr %w0,%w0,%w1" : "+r"(crypto_int32_x) : "r"(crypto_int32_s) : ); +#else + crypto_int32_x >>= crypto_int32_s ^ crypto_int32_optblocker; +#endif + return crypto_int32_bottombit_01(crypto_int32_x); +} + +__attribute__((unused)) +static inline +crypto_int32 crypto_int32_shlmod(crypto_int32 crypto_int32_x,crypto_int32 crypto_int32_s) { +#if defined(__GNUC__) && defined(__x86_64__) + __asm__ ("shll %%cl,%0" : "+r"(crypto_int32_x) : "c"(crypto_int32_s) : "cc"); +#elif defined(__GNUC__) && defined(__aarch64__) + __asm__ ("lsl %w0,%w0,%w1" : "+r"(crypto_int32_x) : "r"(crypto_int32_s) : ); +#else + int crypto_int32_k, crypto_int32_l; + for (crypto_int32_l = 0,crypto_int32_k = 1;crypto_int32_k < 32;++crypto_int32_l,crypto_int32_k *= 2) + crypto_int32_x ^= (crypto_int32_x ^ (crypto_int32_x << crypto_int32_k)) & crypto_int32_bitinrangepublicpos_mask(crypto_int32_s,crypto_int32_l); +#endif + return crypto_int32_x; +} + +__attribute__((unused)) +static inline +crypto_int32 crypto_int32_shrmod(crypto_int32 crypto_int32_x,crypto_int32 crypto_int32_s) { +#if defined(__GNUC__) && defined(__x86_64__) + __asm__ ("sarl %%cl,%0" : "+r"(crypto_int32_x) : "c"(crypto_int32_s) : "cc"); +#elif defined(__GNUC__) && defined(__aarch64__) + __asm__ ("asr %w0,%w0,%w1" : "+r"(crypto_int32_x) : "r"(crypto_int32_s) : ); +#else + int crypto_int32_k, crypto_int32_l; + for (crypto_int32_l = 0,crypto_int32_k = 1;crypto_int32_k < 32;++crypto_int32_l,crypto_int32_k *= 2) + crypto_int32_x ^= (crypto_int32_x ^ (crypto_int32_x >> crypto_int32_k)) & crypto_int32_bitinrangepublicpos_mask(crypto_int32_s,crypto_int32_l); +#endif + return crypto_int32_x; +} + +__attribute__((unused)) +static inline +crypto_int32 crypto_int32_bitmod_mask(crypto_int32 crypto_int32_x,crypto_int32 crypto_int32_s) { + crypto_int32_x = crypto_int32_shrmod(crypto_int32_x,crypto_int32_s); + return crypto_int32_bottombit_mask(crypto_int32_x); +} + +__attribute__((unused)) +static inline +crypto_int32 crypto_int32_bitmod_01(crypto_int32 crypto_int32_x,crypto_int32 crypto_int32_s) { + crypto_int32_x = crypto_int32_shrmod(crypto_int32_x,crypto_int32_s); + return crypto_int32_bottombit_01(crypto_int32_x); +} + +__attribute__((unused)) +static inline +crypto_int32 crypto_int32_nonzero_mask(crypto_int32 crypto_int32_x) { +#if defined(__GNUC__) && defined(__x86_64__) + crypto_int32 crypto_int32_q,crypto_int32_z; + __asm__ ("xorl %0,%0\n movl $-1,%1\n testl %2,%2\n cmovnel %1,%0" : "=&r"(crypto_int32_z), "=&r"(crypto_int32_q) : "r"(crypto_int32_x) : "cc"); + return crypto_int32_z; +#elif defined(__GNUC__) && defined(__aarch64__) + crypto_int32 crypto_int32_z; + __asm__ ("cmp %w1,0\n csetm %w0,ne" : "=r"(crypto_int32_z) : "r"(crypto_int32_x) : "cc"); + return crypto_int32_z; +#else + crypto_int32_x |= -crypto_int32_x; + return crypto_int32_negative_mask(crypto_int32_x); +#endif +} + +__attribute__((unused)) +static inline +crypto_int32 crypto_int32_nonzero_01(crypto_int32 crypto_int32_x) { +#if defined(__GNUC__) && defined(__x86_64__) + crypto_int32 crypto_int32_q,crypto_int32_z; + __asm__ ("xorl %0,%0\n movl $1,%1\n testl %2,%2\n cmovnel %1,%0" : "=&r"(crypto_int32_z), "=&r"(crypto_int32_q) : "r"(crypto_int32_x) : "cc"); + return crypto_int32_z; +#elif defined(__GNUC__) && defined(__aarch64__) + crypto_int32 crypto_int32_z; + __asm__ ("cmp %w1,0\n cset %w0,ne" : "=r"(crypto_int32_z) : "r"(crypto_int32_x) : "cc"); + return crypto_int32_z; +#else + crypto_int32_x |= -crypto_int32_x; + return crypto_int32_unsigned_topbit_01(crypto_int32_x); +#endif +} + +__attribute__((unused)) +static inline +crypto_int32 crypto_int32_positive_mask(crypto_int32 crypto_int32_x) { +#if defined(__GNUC__) && defined(__x86_64__) + crypto_int32 crypto_int32_q,crypto_int32_z; + __asm__ ("xorl %0,%0\n movl $-1,%1\n testl %2,%2\n cmovgl %1,%0" : "=&r"(crypto_int32_z), "=&r"(crypto_int32_q) : "r"(crypto_int32_x) : "cc"); + return crypto_int32_z; +#elif defined(__GNUC__) && defined(__aarch64__) + crypto_int32 crypto_int32_z; + __asm__ ("cmp %w1,0\n csetm %w0,gt" : "=r"(crypto_int32_z) : "r"(crypto_int32_x) : "cc"); + return crypto_int32_z; +#else + crypto_int32 crypto_int32_z = -crypto_int32_x; + crypto_int32_z ^= crypto_int32_x & crypto_int32_z; + return crypto_int32_negative_mask(crypto_int32_z); +#endif +} + +__attribute__((unused)) +static inline +crypto_int32 crypto_int32_positive_01(crypto_int32 crypto_int32_x) { +#if defined(__GNUC__) && defined(__x86_64__) + crypto_int32 crypto_int32_q,crypto_int32_z; + __asm__ ("xorl %0,%0\n movl $1,%1\n testl %2,%2\n cmovgl %1,%0" : "=&r"(crypto_int32_z), "=&r"(crypto_int32_q) : "r"(crypto_int32_x) : "cc"); + return crypto_int32_z; +#elif defined(__GNUC__) && defined(__aarch64__) + crypto_int32 crypto_int32_z; + __asm__ ("cmp %w1,0\n cset %w0,gt" : "=r"(crypto_int32_z) : "r"(crypto_int32_x) : "cc"); + return crypto_int32_z; +#else + crypto_int32 crypto_int32_z = -crypto_int32_x; + crypto_int32_z ^= crypto_int32_x & crypto_int32_z; + return crypto_int32_unsigned_topbit_01(crypto_int32_z); +#endif +} + +__attribute__((unused)) +static inline +crypto_int32 crypto_int32_zero_mask(crypto_int32 crypto_int32_x) { +#if defined(__GNUC__) && defined(__x86_64__) + crypto_int32 crypto_int32_q,crypto_int32_z; + __asm__ ("xorl %0,%0\n movl $-1,%1\n testl %2,%2\n cmovel %1,%0" : "=&r"(crypto_int32_z), "=&r"(crypto_int32_q) : "r"(crypto_int32_x) : "cc"); + return crypto_int32_z; +#elif defined(__GNUC__) && defined(__aarch64__) + crypto_int32 crypto_int32_z; + __asm__ ("cmp %w1,0\n csetm %w0,eq" : "=r"(crypto_int32_z) : "r"(crypto_int32_x) : "cc"); + return crypto_int32_z; +#else + return ~crypto_int32_nonzero_mask(crypto_int32_x); +#endif +} + +__attribute__((unused)) +static inline +crypto_int32 crypto_int32_zero_01(crypto_int32 crypto_int32_x) { +#if defined(__GNUC__) && defined(__x86_64__) + crypto_int32 crypto_int32_q,crypto_int32_z; + __asm__ ("xorl %0,%0\n movl $1,%1\n testl %2,%2\n cmovel %1,%0" : "=&r"(crypto_int32_z), "=&r"(crypto_int32_q) : "r"(crypto_int32_x) : "cc"); + return crypto_int32_z; +#elif defined(__GNUC__) && defined(__aarch64__) + crypto_int32 crypto_int32_z; + __asm__ ("cmp %w1,0\n cset %w0,eq" : "=r"(crypto_int32_z) : "r"(crypto_int32_x) : "cc"); + return crypto_int32_z; +#else + return 1-crypto_int32_nonzero_01(crypto_int32_x); +#endif +} + +__attribute__((unused)) +static inline +crypto_int32 crypto_int32_unequal_mask(crypto_int32 crypto_int32_x,crypto_int32 crypto_int32_y) { +#if defined(__GNUC__) && defined(__x86_64__) + crypto_int32 crypto_int32_q,crypto_int32_z; + __asm__ ("xorl %0,%0\n movl $-1,%1\n cmpl %3,%2\n cmovnel %1,%0" : "=&r"(crypto_int32_z), "=&r"(crypto_int32_q) : "r"(crypto_int32_x), "r"(crypto_int32_y) : "cc"); + return crypto_int32_z; +#elif defined(__GNUC__) && defined(__aarch64__) + crypto_int32 crypto_int32_z; + __asm__ ("cmp %w1,%w2\n csetm %w0,ne" : "=r"(crypto_int32_z) : "r"(crypto_int32_x), "r"(crypto_int32_y) : "cc"); + return crypto_int32_z; +#else + return crypto_int32_nonzero_mask(crypto_int32_x ^ crypto_int32_y); +#endif +} + +__attribute__((unused)) +static inline +crypto_int32 crypto_int32_unequal_01(crypto_int32 crypto_int32_x,crypto_int32 crypto_int32_y) { +#if defined(__GNUC__) && defined(__x86_64__) + crypto_int32 crypto_int32_q,crypto_int32_z; + __asm__ ("xorl %0,%0\n movl $1,%1\n cmpl %3,%2\n cmovnel %1,%0" : "=&r"(crypto_int32_z), "=&r"(crypto_int32_q) : "r"(crypto_int32_x), "r"(crypto_int32_y) : "cc"); + return crypto_int32_z; +#elif defined(__GNUC__) && defined(__aarch64__) + crypto_int32 crypto_int32_z; + __asm__ ("cmp %w1,%w2\n cset %w0,ne" : "=r"(crypto_int32_z) : "r"(crypto_int32_x), "r"(crypto_int32_y) : "cc"); + return crypto_int32_z; +#else + return crypto_int32_nonzero_01(crypto_int32_x ^ crypto_int32_y); +#endif +} + +__attribute__((unused)) +static inline +crypto_int32 crypto_int32_equal_mask(crypto_int32 crypto_int32_x,crypto_int32 crypto_int32_y) { +#if defined(__GNUC__) && defined(__x86_64__) + crypto_int32 crypto_int32_q,crypto_int32_z; + __asm__ ("xorl %0,%0\n movl $-1,%1\n cmpl %3,%2\n cmovel %1,%0" : "=&r"(crypto_int32_z), "=&r"(crypto_int32_q) : "r"(crypto_int32_x), "r"(crypto_int32_y) : "cc"); + return crypto_int32_z; +#elif defined(__GNUC__) && defined(__aarch64__) + crypto_int32 crypto_int32_z; + __asm__ ("cmp %w1,%w2\n csetm %w0,eq" : "=r"(crypto_int32_z) : "r"(crypto_int32_x), "r"(crypto_int32_y) : "cc"); + return crypto_int32_z; +#else + return ~crypto_int32_unequal_mask(crypto_int32_x,crypto_int32_y); +#endif +} + +__attribute__((unused)) +static inline +crypto_int32 crypto_int32_equal_01(crypto_int32 crypto_int32_x,crypto_int32 crypto_int32_y) { +#if defined(__GNUC__) && defined(__x86_64__) + crypto_int32 crypto_int32_q,crypto_int32_z; + __asm__ ("xorl %0,%0\n movl $1,%1\n cmpl %3,%2\n cmovel %1,%0" : "=&r"(crypto_int32_z), "=&r"(crypto_int32_q) : "r"(crypto_int32_x), "r"(crypto_int32_y) : "cc"); + return crypto_int32_z; +#elif defined(__GNUC__) && defined(__aarch64__) + crypto_int32 crypto_int32_z; + __asm__ ("cmp %w1,%w2\n cset %w0,eq" : "=r"(crypto_int32_z) : "r"(crypto_int32_x), "r"(crypto_int32_y) : "cc"); + return crypto_int32_z; +#else + return 1-crypto_int32_unequal_01(crypto_int32_x,crypto_int32_y); +#endif +} + +__attribute__((unused)) +static inline +crypto_int32 crypto_int32_min(crypto_int32 crypto_int32_x,crypto_int32 crypto_int32_y) { +#if defined(__GNUC__) && defined(__x86_64__) + __asm__ ("cmpl %1,%0\n cmovgl %1,%0" : "+r"(crypto_int32_x) : "r"(crypto_int32_y) : "cc"); + return crypto_int32_x; +#elif defined(__GNUC__) && defined(__aarch64__) + __asm__ ("cmp %w0,%w1\n csel %w0,%w0,%w1,lt" : "+r"(crypto_int32_x) : "r"(crypto_int32_y) : "cc"); + return crypto_int32_x; +#else + crypto_int32 crypto_int32_r = crypto_int32_y ^ crypto_int32_x; + crypto_int32 crypto_int32_z = crypto_int32_y - crypto_int32_x; + crypto_int32_z ^= crypto_int32_r & (crypto_int32_z ^ crypto_int32_y); + crypto_int32_z = crypto_int32_negative_mask(crypto_int32_z); + crypto_int32_z &= crypto_int32_r; + return crypto_int32_x ^ crypto_int32_z; +#endif +} + +__attribute__((unused)) +static inline +crypto_int32 crypto_int32_max(crypto_int32 crypto_int32_x,crypto_int32 crypto_int32_y) { +#if defined(__GNUC__) && defined(__x86_64__) + __asm__ ("cmpl %1,%0\n cmovll %1,%0" : "+r"(crypto_int32_x) : "r"(crypto_int32_y) : "cc"); + return crypto_int32_x; +#elif defined(__GNUC__) && defined(__aarch64__) + __asm__ ("cmp %w0,%w1\n csel %w0,%w1,%w0,lt" : "+r"(crypto_int32_x) : "r"(crypto_int32_y) : "cc"); + return crypto_int32_x; +#else + crypto_int32 crypto_int32_r = crypto_int32_y ^ crypto_int32_x; + crypto_int32 crypto_int32_z = crypto_int32_y - crypto_int32_x; + crypto_int32_z ^= crypto_int32_r & (crypto_int32_z ^ crypto_int32_y); + crypto_int32_z = crypto_int32_negative_mask(crypto_int32_z); + crypto_int32_z &= crypto_int32_r; + return crypto_int32_y ^ crypto_int32_z; +#endif +} + +__attribute__((unused)) +static inline +void crypto_int32_minmax(crypto_int32 *crypto_int32_p,crypto_int32 *crypto_int32_q) { + crypto_int32 crypto_int32_x = *crypto_int32_p; + crypto_int32 crypto_int32_y = *crypto_int32_q; +#if defined(__GNUC__) && defined(__x86_64__) + crypto_int32 crypto_int32_z; + __asm__ ("cmpl %2,%1\n movl %1,%0\n cmovgl %2,%1\n cmovgl %0,%2" : "=&r"(crypto_int32_z), "+&r"(crypto_int32_x), "+r"(crypto_int32_y) : : "cc"); + *crypto_int32_p = crypto_int32_x; + *crypto_int32_q = crypto_int32_y; +#elif defined(__GNUC__) && defined(__aarch64__) + crypto_int32 crypto_int32_r, crypto_int32_s; + __asm__ ("cmp %w2,%w3\n csel %w0,%w2,%w3,lt\n csel %w1,%w3,%w2,lt" : "=&r"(crypto_int32_r), "=r"(crypto_int32_s) : "r"(crypto_int32_x), "r"(crypto_int32_y) : "cc"); + *crypto_int32_p = crypto_int32_r; + *crypto_int32_q = crypto_int32_s; +#else + crypto_int32 crypto_int32_r = crypto_int32_y ^ crypto_int32_x; + crypto_int32 crypto_int32_z = crypto_int32_y - crypto_int32_x; + crypto_int32_z ^= crypto_int32_r & (crypto_int32_z ^ crypto_int32_y); + crypto_int32_z = crypto_int32_negative_mask(crypto_int32_z); + crypto_int32_z &= crypto_int32_r; + crypto_int32_x ^= crypto_int32_z; + crypto_int32_y ^= crypto_int32_z; + *crypto_int32_p = crypto_int32_x; + *crypto_int32_q = crypto_int32_y; +#endif +} + +__attribute__((unused)) +static inline +crypto_int32 crypto_int32_smaller_mask(crypto_int32 crypto_int32_x,crypto_int32 crypto_int32_y) { +#if defined(__GNUC__) && defined(__x86_64__) + crypto_int32 crypto_int32_q,crypto_int32_z; + __asm__ ("xorl %0,%0\n movl $-1,%1\n cmpl %3,%2\n cmovll %1,%0" : "=&r"(crypto_int32_z), "=&r"(crypto_int32_q) : "r"(crypto_int32_x), "r"(crypto_int32_y) : "cc"); + return crypto_int32_z; +#elif defined(__GNUC__) && defined(__aarch64__) + crypto_int32 crypto_int32_z; + __asm__ ("cmp %w1,%w2\n csetm %w0,lt" : "=r"(crypto_int32_z) : "r"(crypto_int32_x), "r"(crypto_int32_y) : "cc"); + return crypto_int32_z; +#else + crypto_int32 crypto_int32_r = crypto_int32_x ^ crypto_int32_y; + crypto_int32 crypto_int32_z = crypto_int32_x - crypto_int32_y; + crypto_int32_z ^= crypto_int32_r & (crypto_int32_z ^ crypto_int32_x); + return crypto_int32_negative_mask(crypto_int32_z); +#endif +} + +__attribute__((unused)) +static inline +crypto_int32 crypto_int32_smaller_01(crypto_int32 crypto_int32_x,crypto_int32 crypto_int32_y) { +#if defined(__GNUC__) && defined(__x86_64__) + crypto_int32 crypto_int32_q,crypto_int32_z; + __asm__ ("xorl %0,%0\n movl $1,%1\n cmpl %3,%2\n cmovll %1,%0" : "=&r"(crypto_int32_z), "=&r"(crypto_int32_q) : "r"(crypto_int32_x), "r"(crypto_int32_y) : "cc"); + return crypto_int32_z; +#elif defined(__GNUC__) && defined(__aarch64__) + crypto_int32 crypto_int32_z; + __asm__ ("cmp %w1,%w2\n cset %w0,lt" : "=r"(crypto_int32_z) : "r"(crypto_int32_x), "r"(crypto_int32_y) : "cc"); + return crypto_int32_z; +#else + crypto_int32 crypto_int32_r = crypto_int32_x ^ crypto_int32_y; + crypto_int32 crypto_int32_z = crypto_int32_x - crypto_int32_y; + crypto_int32_z ^= crypto_int32_r & (crypto_int32_z ^ crypto_int32_x); + return crypto_int32_unsigned_topbit_01(crypto_int32_z); +#endif +} + +__attribute__((unused)) +static inline +crypto_int32 crypto_int32_leq_mask(crypto_int32 crypto_int32_x,crypto_int32 crypto_int32_y) { +#if defined(__GNUC__) && defined(__x86_64__) + crypto_int32 crypto_int32_q,crypto_int32_z; + __asm__ ("xorl %0,%0\n movl $-1,%1\n cmpl %3,%2\n cmovlel %1,%0" : "=&r"(crypto_int32_z), "=&r"(crypto_int32_q) : "r"(crypto_int32_x), "r"(crypto_int32_y) : "cc"); + return crypto_int32_z; +#elif defined(__GNUC__) && defined(__aarch64__) + crypto_int32 crypto_int32_z; + __asm__ ("cmp %w1,%w2\n csetm %w0,le" : "=r"(crypto_int32_z) : "r"(crypto_int32_x), "r"(crypto_int32_y) : "cc"); + return crypto_int32_z; +#else + return ~crypto_int32_smaller_mask(crypto_int32_y,crypto_int32_x); +#endif +} + +__attribute__((unused)) +static inline +crypto_int32 crypto_int32_leq_01(crypto_int32 crypto_int32_x,crypto_int32 crypto_int32_y) { +#if defined(__GNUC__) && defined(__x86_64__) + crypto_int32 crypto_int32_q,crypto_int32_z; + __asm__ ("xorl %0,%0\n movl $1,%1\n cmpl %3,%2\n cmovlel %1,%0" : "=&r"(crypto_int32_z), "=&r"(crypto_int32_q) : "r"(crypto_int32_x), "r"(crypto_int32_y) : "cc"); + return crypto_int32_z; +#elif defined(__GNUC__) && defined(__aarch64__) + crypto_int32 crypto_int32_z; + __asm__ ("cmp %w1,%w2\n cset %w0,le" : "=r"(crypto_int32_z) : "r"(crypto_int32_x), "r"(crypto_int32_y) : "cc"); + return crypto_int32_z; +#else + return 1-crypto_int32_smaller_01(crypto_int32_y,crypto_int32_x); +#endif +} + +__attribute__((unused)) +static inline +int crypto_int32_ones_num(crypto_int32 crypto_int32_x) { + crypto_int32_unsigned crypto_int32_y = crypto_int32_x; + const crypto_int32 C0 = 0x55555555; + const crypto_int32 C1 = 0x33333333; + const crypto_int32 C2 = 0x0f0f0f0f; + crypto_int32_y -= ((crypto_int32_y >> 1) & C0); + crypto_int32_y = (crypto_int32_y & C1) + ((crypto_int32_y >> 2) & C1); + crypto_int32_y = (crypto_int32_y + (crypto_int32_y >> 4)) & C2; + crypto_int32_y += crypto_int32_y >> 8; + crypto_int32_y = (crypto_int32_y + (crypto_int32_y >> 16)) & 0xff; + return crypto_int32_y; +} + +__attribute__((unused)) +static inline +int crypto_int32_bottomzeros_num(crypto_int32 crypto_int32_x) { +#if defined(__GNUC__) && defined(__x86_64__) + crypto_int32 fallback = 32; + __asm__ ("bsfl %0,%0\n cmovel %1,%0" : "+&r"(crypto_int32_x) : "r"(fallback) : "cc"); + return crypto_int32_x; +#elif defined(__GNUC__) && defined(__aarch64__) + int64_t crypto_int32_y; + __asm__ ("rbit %w0,%w1\n clz %w0,%w0" : "=r"(crypto_int32_y) : "r"(crypto_int32_x) : ); + return crypto_int32_y; +#else + crypto_int32 crypto_int32_y = crypto_int32_x ^ (crypto_int32_x-1); + crypto_int32_y = ((crypto_int32) crypto_int32_y) >> 1; + crypto_int32_y &= ~(crypto_int32_x & (((crypto_int32) 1) << (32-1))); + return crypto_int32_ones_num(crypto_int32_y); +#endif +} + +#endif + +/* from supercop-20240808/cryptoint/crypto_int64.h */ +/* auto-generated: cd cryptoint; ./autogen */ +/* cryptoint 20240806 */ + +#ifndef crypto_int64_h +#define crypto_int64_h + +#define crypto_int64 int64_t +#define crypto_int64_unsigned uint64_t + + + +__attribute__((unused)) +static inline +crypto_int64 crypto_int64_load(const unsigned char *crypto_int64_s) { + crypto_int64 crypto_int64_z = 0; + crypto_int64_z |= ((crypto_int64) (*crypto_int64_s++)) << 0; + crypto_int64_z |= ((crypto_int64) (*crypto_int64_s++)) << 8; + crypto_int64_z |= ((crypto_int64) (*crypto_int64_s++)) << 16; + crypto_int64_z |= ((crypto_int64) (*crypto_int64_s++)) << 24; + crypto_int64_z |= ((crypto_int64) (*crypto_int64_s++)) << 32; + crypto_int64_z |= ((crypto_int64) (*crypto_int64_s++)) << 40; + crypto_int64_z |= ((crypto_int64) (*crypto_int64_s++)) << 48; + crypto_int64_z |= ((crypto_int64) (*crypto_int64_s++)) << 56; + return crypto_int64_z; +} + +__attribute__((unused)) +static inline +void crypto_int64_store(unsigned char *crypto_int64_s,crypto_int64 crypto_int64_x) { + *crypto_int64_s++ = crypto_int64_x >> 0; + *crypto_int64_s++ = crypto_int64_x >> 8; + *crypto_int64_s++ = crypto_int64_x >> 16; + *crypto_int64_s++ = crypto_int64_x >> 24; + *crypto_int64_s++ = crypto_int64_x >> 32; + *crypto_int64_s++ = crypto_int64_x >> 40; + *crypto_int64_s++ = crypto_int64_x >> 48; + *crypto_int64_s++ = crypto_int64_x >> 56; +} + +__attribute__((unused)) +static inline +crypto_int64 crypto_int64_negative_mask(crypto_int64 crypto_int64_x) { +#if defined(__GNUC__) && defined(__x86_64__) + __asm__ ("sarq $63,%0" : "+r"(crypto_int64_x) : : "cc"); + return crypto_int64_x; +#elif defined(__GNUC__) && defined(__aarch64__) + crypto_int64 crypto_int64_y; + __asm__ ("asr %0,%1,63" : "=r"(crypto_int64_y) : "r"(crypto_int64_x) : ); + return crypto_int64_y; +#else + crypto_int64_x >>= 64-6; + crypto_int64_x ^= crypto_int64_optblocker; + crypto_int64_x >>= 5; + return crypto_int64_x; +#endif +} + +__attribute__((unused)) +static inline +crypto_int64_unsigned crypto_int64_unsigned_topbit_01(crypto_int64_unsigned crypto_int64_x) { +#if defined(__GNUC__) && defined(__x86_64__) + __asm__ ("shrq $63,%0" : "+r"(crypto_int64_x) : : "cc"); + return crypto_int64_x; +#elif defined(__GNUC__) && defined(__aarch64__) + crypto_int64 crypto_int64_y; + __asm__ ("lsr %0,%1,63" : "=r"(crypto_int64_y) : "r"(crypto_int64_x) : ); + return crypto_int64_y; +#else + crypto_int64_x >>= 64-6; + crypto_int64_x ^= crypto_int64_optblocker; + crypto_int64_x >>= 5; + return crypto_int64_x; +#endif +} + +__attribute__((unused)) +static inline +crypto_int64 crypto_int64_negative_01(crypto_int64 crypto_int64_x) { + return crypto_int64_unsigned_topbit_01(crypto_int64_x); +} + +__attribute__((unused)) +static inline +crypto_int64 crypto_int64_topbit_mask(crypto_int64 crypto_int64_x) { + return crypto_int64_negative_mask(crypto_int64_x); +} + +__attribute__((unused)) +static inline +crypto_int64 crypto_int64_topbit_01(crypto_int64 crypto_int64_x) { + return crypto_int64_unsigned_topbit_01(crypto_int64_x); +} + +__attribute__((unused)) +static inline +crypto_int64 crypto_int64_bottombit_mask(crypto_int64 crypto_int64_x) { +#if defined(__GNUC__) && defined(__x86_64__) + __asm__ ("andq $1,%0" : "+r"(crypto_int64_x) : : "cc"); + return -crypto_int64_x; +#elif defined(__GNUC__) && defined(__aarch64__) + crypto_int64 crypto_int64_y; + __asm__ ("sbfx %0,%1,0,1" : "=r"(crypto_int64_y) : "r"(crypto_int64_x) : ); + return crypto_int64_y; +#else + crypto_int64_x &= 1 ^ crypto_int64_optblocker; + return -crypto_int64_x; +#endif +} + +__attribute__((unused)) +static inline +crypto_int64 crypto_int64_bottombit_01(crypto_int64 crypto_int64_x) { +#if defined(__GNUC__) && defined(__x86_64__) + __asm__ ("andq $1,%0" : "+r"(crypto_int64_x) : : "cc"); + return crypto_int64_x; +#elif defined(__GNUC__) && defined(__aarch64__) + crypto_int64 crypto_int64_y; + __asm__ ("ubfx %0,%1,0,1" : "=r"(crypto_int64_y) : "r"(crypto_int64_x) : ); + return crypto_int64_y; +#else + crypto_int64_x &= 1 ^ crypto_int64_optblocker; + return crypto_int64_x; +#endif +} + +__attribute__((unused)) +static inline +crypto_int64 crypto_int64_bitinrangepublicpos_mask(crypto_int64 crypto_int64_x,crypto_int64 crypto_int64_s) { +#if defined(__GNUC__) && defined(__x86_64__) + __asm__ ("sarq %%cl,%0" : "+r"(crypto_int64_x) : "c"(crypto_int64_s) : "cc"); +#elif defined(__GNUC__) && defined(__aarch64__) + __asm__ ("asr %0,%0,%1" : "+r"(crypto_int64_x) : "r"(crypto_int64_s) : ); +#else + crypto_int64_x >>= crypto_int64_s ^ crypto_int64_optblocker; +#endif + return crypto_int64_bottombit_mask(crypto_int64_x); +} + +__attribute__((unused)) +static inline +crypto_int64 crypto_int64_bitinrangepublicpos_01(crypto_int64 crypto_int64_x,crypto_int64 crypto_int64_s) { +#if defined(__GNUC__) && defined(__x86_64__) + __asm__ ("sarq %%cl,%0" : "+r"(crypto_int64_x) : "c"(crypto_int64_s) : "cc"); +#elif defined(__GNUC__) && defined(__aarch64__) + __asm__ ("asr %0,%0,%1" : "+r"(crypto_int64_x) : "r"(crypto_int64_s) : ); +#else + crypto_int64_x >>= crypto_int64_s ^ crypto_int64_optblocker; +#endif + return crypto_int64_bottombit_01(crypto_int64_x); +} + +__attribute__((unused)) +static inline +crypto_int64 crypto_int64_shlmod(crypto_int64 crypto_int64_x,crypto_int64 crypto_int64_s) { +#if defined(__GNUC__) && defined(__x86_64__) + __asm__ ("shlq %%cl,%0" : "+r"(crypto_int64_x) : "c"(crypto_int64_s) : "cc"); +#elif defined(__GNUC__) && defined(__aarch64__) + __asm__ ("lsl %0,%0,%1" : "+r"(crypto_int64_x) : "r"(crypto_int64_s) : ); +#else + int crypto_int64_k, crypto_int64_l; + for (crypto_int64_l = 0,crypto_int64_k = 1;crypto_int64_k < 64;++crypto_int64_l,crypto_int64_k *= 2) + crypto_int64_x ^= (crypto_int64_x ^ (crypto_int64_x << crypto_int64_k)) & crypto_int64_bitinrangepublicpos_mask(crypto_int64_s,crypto_int64_l); +#endif + return crypto_int64_x; +} + +__attribute__((unused)) +static inline +crypto_int64 crypto_int64_shrmod(crypto_int64 crypto_int64_x,crypto_int64 crypto_int64_s) { +#if defined(__GNUC__) && defined(__x86_64__) + __asm__ ("sarq %%cl,%0" : "+r"(crypto_int64_x) : "c"(crypto_int64_s) : "cc"); +#elif defined(__GNUC__) && defined(__aarch64__) + __asm__ ("asr %0,%0,%1" : "+r"(crypto_int64_x) : "r"(crypto_int64_s) : ); +#else + int crypto_int64_k, crypto_int64_l; + for (crypto_int64_l = 0,crypto_int64_k = 1;crypto_int64_k < 64;++crypto_int64_l,crypto_int64_k *= 2) + crypto_int64_x ^= (crypto_int64_x ^ (crypto_int64_x >> crypto_int64_k)) & crypto_int64_bitinrangepublicpos_mask(crypto_int64_s,crypto_int64_l); +#endif + return crypto_int64_x; +} + +__attribute__((unused)) +static inline +crypto_int64 crypto_int64_bitmod_mask(crypto_int64 crypto_int64_x,crypto_int64 crypto_int64_s) { + crypto_int64_x = crypto_int64_shrmod(crypto_int64_x,crypto_int64_s); + return crypto_int64_bottombit_mask(crypto_int64_x); +} + +__attribute__((unused)) +static inline +crypto_int64 crypto_int64_bitmod_01(crypto_int64 crypto_int64_x,crypto_int64 crypto_int64_s) { + crypto_int64_x = crypto_int64_shrmod(crypto_int64_x,crypto_int64_s); + return crypto_int64_bottombit_01(crypto_int64_x); +} + +__attribute__((unused)) +static inline +crypto_int64 crypto_int64_nonzero_mask(crypto_int64 crypto_int64_x) { +#if defined(__GNUC__) && defined(__x86_64__) + crypto_int64 crypto_int64_q,crypto_int64_z; + __asm__ ("xorq %0,%0\n movq $-1,%1\n testq %2,%2\n cmovneq %1,%0" : "=&r"(crypto_int64_z), "=&r"(crypto_int64_q) : "r"(crypto_int64_x) : "cc"); + return crypto_int64_z; +#elif defined(__GNUC__) && defined(__aarch64__) + crypto_int64 crypto_int64_z; + __asm__ ("cmp %1,0\n csetm %0,ne" : "=r"(crypto_int64_z) : "r"(crypto_int64_x) : "cc"); + return crypto_int64_z; +#else + crypto_int64_x |= -crypto_int64_x; + return crypto_int64_negative_mask(crypto_int64_x); +#endif +} + +__attribute__((unused)) +static inline +crypto_int64 crypto_int64_nonzero_01(crypto_int64 crypto_int64_x) { +#if defined(__GNUC__) && defined(__x86_64__) + crypto_int64 crypto_int64_q,crypto_int64_z; + __asm__ ("xorq %0,%0\n movq $1,%1\n testq %2,%2\n cmovneq %1,%0" : "=&r"(crypto_int64_z), "=&r"(crypto_int64_q) : "r"(crypto_int64_x) : "cc"); + return crypto_int64_z; +#elif defined(__GNUC__) && defined(__aarch64__) + crypto_int64 crypto_int64_z; + __asm__ ("cmp %1,0\n cset %0,ne" : "=r"(crypto_int64_z) : "r"(crypto_int64_x) : "cc"); + return crypto_int64_z; +#else + crypto_int64_x |= -crypto_int64_x; + return crypto_int64_unsigned_topbit_01(crypto_int64_x); +#endif +} + +__attribute__((unused)) +static inline +crypto_int64 crypto_int64_positive_mask(crypto_int64 crypto_int64_x) { +#if defined(__GNUC__) && defined(__x86_64__) + crypto_int64 crypto_int64_q,crypto_int64_z; + __asm__ ("xorq %0,%0\n movq $-1,%1\n testq %2,%2\n cmovgq %1,%0" : "=&r"(crypto_int64_z), "=&r"(crypto_int64_q) : "r"(crypto_int64_x) : "cc"); + return crypto_int64_z; +#elif defined(__GNUC__) && defined(__aarch64__) + crypto_int64 crypto_int64_z; + __asm__ ("cmp %1,0\n csetm %0,gt" : "=r"(crypto_int64_z) : "r"(crypto_int64_x) : "cc"); + return crypto_int64_z; +#else + crypto_int64 crypto_int64_z = -crypto_int64_x; + crypto_int64_z ^= crypto_int64_x & crypto_int64_z; + return crypto_int64_negative_mask(crypto_int64_z); +#endif +} + +__attribute__((unused)) +static inline +crypto_int64 crypto_int64_positive_01(crypto_int64 crypto_int64_x) { +#if defined(__GNUC__) && defined(__x86_64__) + crypto_int64 crypto_int64_q,crypto_int64_z; + __asm__ ("xorq %0,%0\n movq $1,%1\n testq %2,%2\n cmovgq %1,%0" : "=&r"(crypto_int64_z), "=&r"(crypto_int64_q) : "r"(crypto_int64_x) : "cc"); + return crypto_int64_z; +#elif defined(__GNUC__) && defined(__aarch64__) + crypto_int64 crypto_int64_z; + __asm__ ("cmp %1,0\n cset %0,gt" : "=r"(crypto_int64_z) : "r"(crypto_int64_x) : "cc"); + return crypto_int64_z; +#else + crypto_int64 crypto_int64_z = -crypto_int64_x; + crypto_int64_z ^= crypto_int64_x & crypto_int64_z; + return crypto_int64_unsigned_topbit_01(crypto_int64_z); +#endif +} + +__attribute__((unused)) +static inline +crypto_int64 crypto_int64_zero_mask(crypto_int64 crypto_int64_x) { +#if defined(__GNUC__) && defined(__x86_64__) + crypto_int64 crypto_int64_q,crypto_int64_z; + __asm__ ("xorq %0,%0\n movq $-1,%1\n testq %2,%2\n cmoveq %1,%0" : "=&r"(crypto_int64_z), "=&r"(crypto_int64_q) : "r"(crypto_int64_x) : "cc"); + return crypto_int64_z; +#elif defined(__GNUC__) && defined(__aarch64__) + crypto_int64 crypto_int64_z; + __asm__ ("cmp %1,0\n csetm %0,eq" : "=r"(crypto_int64_z) : "r"(crypto_int64_x) : "cc"); + return crypto_int64_z; +#else + return ~crypto_int64_nonzero_mask(crypto_int64_x); +#endif +} + +__attribute__((unused)) +static inline +crypto_int64 crypto_int64_zero_01(crypto_int64 crypto_int64_x) { +#if defined(__GNUC__) && defined(__x86_64__) + crypto_int64 crypto_int64_q,crypto_int64_z; + __asm__ ("xorq %0,%0\n movq $1,%1\n testq %2,%2\n cmoveq %1,%0" : "=&r"(crypto_int64_z), "=&r"(crypto_int64_q) : "r"(crypto_int64_x) : "cc"); + return crypto_int64_z; +#elif defined(__GNUC__) && defined(__aarch64__) + crypto_int64 crypto_int64_z; + __asm__ ("cmp %1,0\n cset %0,eq" : "=r"(crypto_int64_z) : "r"(crypto_int64_x) : "cc"); + return crypto_int64_z; +#else + return 1-crypto_int64_nonzero_01(crypto_int64_x); +#endif +} + +__attribute__((unused)) +static inline +crypto_int64 crypto_int64_unequal_mask(crypto_int64 crypto_int64_x,crypto_int64 crypto_int64_y) { +#if defined(__GNUC__) && defined(__x86_64__) + crypto_int64 crypto_int64_q,crypto_int64_z; + __asm__ ("xorq %0,%0\n movq $-1,%1\n cmpq %3,%2\n cmovneq %1,%0" : "=&r"(crypto_int64_z), "=&r"(crypto_int64_q) : "r"(crypto_int64_x), "r"(crypto_int64_y) : "cc"); + return crypto_int64_z; +#elif defined(__GNUC__) && defined(__aarch64__) + crypto_int64 crypto_int64_z; + __asm__ ("cmp %1,%2\n csetm %0,ne" : "=r"(crypto_int64_z) : "r"(crypto_int64_x), "r"(crypto_int64_y) : "cc"); + return crypto_int64_z; +#else + return crypto_int64_nonzero_mask(crypto_int64_x ^ crypto_int64_y); +#endif +} + +__attribute__((unused)) +static inline +crypto_int64 crypto_int64_unequal_01(crypto_int64 crypto_int64_x,crypto_int64 crypto_int64_y) { +#if defined(__GNUC__) && defined(__x86_64__) + crypto_int64 crypto_int64_q,crypto_int64_z; + __asm__ ("xorq %0,%0\n movq $1,%1\n cmpq %3,%2\n cmovneq %1,%0" : "=&r"(crypto_int64_z), "=&r"(crypto_int64_q) : "r"(crypto_int64_x), "r"(crypto_int64_y) : "cc"); + return crypto_int64_z; +#elif defined(__GNUC__) && defined(__aarch64__) + crypto_int64 crypto_int64_z; + __asm__ ("cmp %1,%2\n cset %0,ne" : "=r"(crypto_int64_z) : "r"(crypto_int64_x), "r"(crypto_int64_y) : "cc"); + return crypto_int64_z; +#else + return crypto_int64_nonzero_01(crypto_int64_x ^ crypto_int64_y); +#endif +} + +__attribute__((unused)) +static inline +crypto_int64 crypto_int64_equal_mask(crypto_int64 crypto_int64_x,crypto_int64 crypto_int64_y) { +#if defined(__GNUC__) && defined(__x86_64__) + crypto_int64 crypto_int64_q,crypto_int64_z; + __asm__ ("xorq %0,%0\n movq $-1,%1\n cmpq %3,%2\n cmoveq %1,%0" : "=&r"(crypto_int64_z), "=&r"(crypto_int64_q) : "r"(crypto_int64_x), "r"(crypto_int64_y) : "cc"); + return crypto_int64_z; +#elif defined(__GNUC__) && defined(__aarch64__) + crypto_int64 crypto_int64_z; + __asm__ ("cmp %1,%2\n csetm %0,eq" : "=r"(crypto_int64_z) : "r"(crypto_int64_x), "r"(crypto_int64_y) : "cc"); + return crypto_int64_z; +#else + return ~crypto_int64_unequal_mask(crypto_int64_x,crypto_int64_y); +#endif +} + +__attribute__((unused)) +static inline +crypto_int64 crypto_int64_equal_01(crypto_int64 crypto_int64_x,crypto_int64 crypto_int64_y) { +#if defined(__GNUC__) && defined(__x86_64__) + crypto_int64 crypto_int64_q,crypto_int64_z; + __asm__ ("xorq %0,%0\n movq $1,%1\n cmpq %3,%2\n cmoveq %1,%0" : "=&r"(crypto_int64_z), "=&r"(crypto_int64_q) : "r"(crypto_int64_x), "r"(crypto_int64_y) : "cc"); + return crypto_int64_z; +#elif defined(__GNUC__) && defined(__aarch64__) + crypto_int64 crypto_int64_z; + __asm__ ("cmp %1,%2\n cset %0,eq" : "=r"(crypto_int64_z) : "r"(crypto_int64_x), "r"(crypto_int64_y) : "cc"); + return crypto_int64_z; +#else + return 1-crypto_int64_unequal_01(crypto_int64_x,crypto_int64_y); +#endif +} + +__attribute__((unused)) +static inline +crypto_int64 crypto_int64_min(crypto_int64 crypto_int64_x,crypto_int64 crypto_int64_y) { +#if defined(__GNUC__) && defined(__x86_64__) + __asm__ ("cmpq %1,%0\n cmovgq %1,%0" : "+r"(crypto_int64_x) : "r"(crypto_int64_y) : "cc"); + return crypto_int64_x; +#elif defined(__GNUC__) && defined(__aarch64__) + __asm__ ("cmp %0,%1\n csel %0,%0,%1,lt" : "+r"(crypto_int64_x) : "r"(crypto_int64_y) : "cc"); + return crypto_int64_x; +#else + crypto_int64 crypto_int64_r = crypto_int64_y ^ crypto_int64_x; + crypto_int64 crypto_int64_z = crypto_int64_y - crypto_int64_x; + crypto_int64_z ^= crypto_int64_r & (crypto_int64_z ^ crypto_int64_y); + crypto_int64_z = crypto_int64_negative_mask(crypto_int64_z); + crypto_int64_z &= crypto_int64_r; + return crypto_int64_x ^ crypto_int64_z; +#endif +} + +__attribute__((unused)) +static inline +crypto_int64 crypto_int64_max(crypto_int64 crypto_int64_x,crypto_int64 crypto_int64_y) { +#if defined(__GNUC__) && defined(__x86_64__) + __asm__ ("cmpq %1,%0\n cmovlq %1,%0" : "+r"(crypto_int64_x) : "r"(crypto_int64_y) : "cc"); + return crypto_int64_x; +#elif defined(__GNUC__) && defined(__aarch64__) + __asm__ ("cmp %0,%1\n csel %0,%1,%0,lt" : "+r"(crypto_int64_x) : "r"(crypto_int64_y) : "cc"); + return crypto_int64_x; +#else + crypto_int64 crypto_int64_r = crypto_int64_y ^ crypto_int64_x; + crypto_int64 crypto_int64_z = crypto_int64_y - crypto_int64_x; + crypto_int64_z ^= crypto_int64_r & (crypto_int64_z ^ crypto_int64_y); + crypto_int64_z = crypto_int64_negative_mask(crypto_int64_z); + crypto_int64_z &= crypto_int64_r; + return crypto_int64_y ^ crypto_int64_z; +#endif +} + +__attribute__((unused)) +static inline +void crypto_int64_minmax(crypto_int64 *crypto_int64_p,crypto_int64 *crypto_int64_q) { + crypto_int64 crypto_int64_x = *crypto_int64_p; + crypto_int64 crypto_int64_y = *crypto_int64_q; +#if defined(__GNUC__) && defined(__x86_64__) + crypto_int64 crypto_int64_z; + __asm__ ("cmpq %2,%1\n movq %1,%0\n cmovgq %2,%1\n cmovgq %0,%2" : "=&r"(crypto_int64_z), "+&r"(crypto_int64_x), "+r"(crypto_int64_y) : : "cc"); + *crypto_int64_p = crypto_int64_x; + *crypto_int64_q = crypto_int64_y; +#elif defined(__GNUC__) && defined(__aarch64__) + crypto_int64 crypto_int64_r, crypto_int64_s; + __asm__ ("cmp %2,%3\n csel %0,%2,%3,lt\n csel %1,%3,%2,lt" : "=&r"(crypto_int64_r), "=r"(crypto_int64_s) : "r"(crypto_int64_x), "r"(crypto_int64_y) : "cc"); + *crypto_int64_p = crypto_int64_r; + *crypto_int64_q = crypto_int64_s; +#else + crypto_int64 crypto_int64_r = crypto_int64_y ^ crypto_int64_x; + crypto_int64 crypto_int64_z = crypto_int64_y - crypto_int64_x; + crypto_int64_z ^= crypto_int64_r & (crypto_int64_z ^ crypto_int64_y); + crypto_int64_z = crypto_int64_negative_mask(crypto_int64_z); + crypto_int64_z &= crypto_int64_r; + crypto_int64_x ^= crypto_int64_z; + crypto_int64_y ^= crypto_int64_z; + *crypto_int64_p = crypto_int64_x; + *crypto_int64_q = crypto_int64_y; +#endif +} + +__attribute__((unused)) +static inline +crypto_int64 crypto_int64_smaller_mask(crypto_int64 crypto_int64_x,crypto_int64 crypto_int64_y) { +#if defined(__GNUC__) && defined(__x86_64__) + crypto_int64 crypto_int64_q,crypto_int64_z; + __asm__ ("xorq %0,%0\n movq $-1,%1\n cmpq %3,%2\n cmovlq %1,%0" : "=&r"(crypto_int64_z), "=&r"(crypto_int64_q) : "r"(crypto_int64_x), "r"(crypto_int64_y) : "cc"); + return crypto_int64_z; +#elif defined(__GNUC__) && defined(__aarch64__) + crypto_int64 crypto_int64_z; + __asm__ ("cmp %1,%2\n csetm %0,lt" : "=r"(crypto_int64_z) : "r"(crypto_int64_x), "r"(crypto_int64_y) : "cc"); + return crypto_int64_z; +#else + crypto_int64 crypto_int64_r = crypto_int64_x ^ crypto_int64_y; + crypto_int64 crypto_int64_z = crypto_int64_x - crypto_int64_y; + crypto_int64_z ^= crypto_int64_r & (crypto_int64_z ^ crypto_int64_x); + return crypto_int64_negative_mask(crypto_int64_z); +#endif +} + +__attribute__((unused)) +static inline +crypto_int64 crypto_int64_smaller_01(crypto_int64 crypto_int64_x,crypto_int64 crypto_int64_y) { +#if defined(__GNUC__) && defined(__x86_64__) + crypto_int64 crypto_int64_q,crypto_int64_z; + __asm__ ("xorq %0,%0\n movq $1,%1\n cmpq %3,%2\n cmovlq %1,%0" : "=&r"(crypto_int64_z), "=&r"(crypto_int64_q) : "r"(crypto_int64_x), "r"(crypto_int64_y) : "cc"); + return crypto_int64_z; +#elif defined(__GNUC__) && defined(__aarch64__) + crypto_int64 crypto_int64_z; + __asm__ ("cmp %1,%2\n cset %0,lt" : "=r"(crypto_int64_z) : "r"(crypto_int64_x), "r"(crypto_int64_y) : "cc"); + return crypto_int64_z; +#else + crypto_int64 crypto_int64_r = crypto_int64_x ^ crypto_int64_y; + crypto_int64 crypto_int64_z = crypto_int64_x - crypto_int64_y; + crypto_int64_z ^= crypto_int64_r & (crypto_int64_z ^ crypto_int64_x); + return crypto_int64_unsigned_topbit_01(crypto_int64_z); +#endif +} + +__attribute__((unused)) +static inline +crypto_int64 crypto_int64_leq_mask(crypto_int64 crypto_int64_x,crypto_int64 crypto_int64_y) { +#if defined(__GNUC__) && defined(__x86_64__) + crypto_int64 crypto_int64_q,crypto_int64_z; + __asm__ ("xorq %0,%0\n movq $-1,%1\n cmpq %3,%2\n cmovleq %1,%0" : "=&r"(crypto_int64_z), "=&r"(crypto_int64_q) : "r"(crypto_int64_x), "r"(crypto_int64_y) : "cc"); + return crypto_int64_z; +#elif defined(__GNUC__) && defined(__aarch64__) + crypto_int64 crypto_int64_z; + __asm__ ("cmp %1,%2\n csetm %0,le" : "=r"(crypto_int64_z) : "r"(crypto_int64_x), "r"(crypto_int64_y) : "cc"); + return crypto_int64_z; +#else + return ~crypto_int64_smaller_mask(crypto_int64_y,crypto_int64_x); +#endif +} + +__attribute__((unused)) +static inline +crypto_int64 crypto_int64_leq_01(crypto_int64 crypto_int64_x,crypto_int64 crypto_int64_y) { +#if defined(__GNUC__) && defined(__x86_64__) + crypto_int64 crypto_int64_q,crypto_int64_z; + __asm__ ("xorq %0,%0\n movq $1,%1\n cmpq %3,%2\n cmovleq %1,%0" : "=&r"(crypto_int64_z), "=&r"(crypto_int64_q) : "r"(crypto_int64_x), "r"(crypto_int64_y) : "cc"); + return crypto_int64_z; +#elif defined(__GNUC__) && defined(__aarch64__) + crypto_int64 crypto_int64_z; + __asm__ ("cmp %1,%2\n cset %0,le" : "=r"(crypto_int64_z) : "r"(crypto_int64_x), "r"(crypto_int64_y) : "cc"); + return crypto_int64_z; +#else + return 1-crypto_int64_smaller_01(crypto_int64_y,crypto_int64_x); +#endif +} + +__attribute__((unused)) +static inline +int crypto_int64_ones_num(crypto_int64 crypto_int64_x) { + crypto_int64_unsigned crypto_int64_y = crypto_int64_x; + const crypto_int64 C0 = 0x5555555555555555; + const crypto_int64 C1 = 0x3333333333333333; + const crypto_int64 C2 = 0x0f0f0f0f0f0f0f0f; + crypto_int64_y -= ((crypto_int64_y >> 1) & C0); + crypto_int64_y = (crypto_int64_y & C1) + ((crypto_int64_y >> 2) & C1); + crypto_int64_y = (crypto_int64_y + (crypto_int64_y >> 4)) & C2; + crypto_int64_y += crypto_int64_y >> 8; + crypto_int64_y += crypto_int64_y >> 16; + crypto_int64_y = (crypto_int64_y + (crypto_int64_y >> 32)) & 0xff; + return crypto_int64_y; +} + +__attribute__((unused)) +static inline +int crypto_int64_bottomzeros_num(crypto_int64 crypto_int64_x) { +#if defined(__GNUC__) && defined(__x86_64__) + crypto_int64 fallback = 64; + __asm__ ("bsfq %0,%0\n cmoveq %1,%0" : "+&r"(crypto_int64_x) : "r"(fallback) : "cc"); + return crypto_int64_x; +#elif defined(__GNUC__) && defined(__aarch64__) + int64_t crypto_int64_y; + __asm__ ("rbit %0,%1\n clz %0,%0" : "=r"(crypto_int64_y) : "r"(crypto_int64_x) : ); + return crypto_int64_y; +#else + crypto_int64 crypto_int64_y = crypto_int64_x ^ (crypto_int64_x-1); + crypto_int64_y = ((crypto_int64) crypto_int64_y) >> 1; + crypto_int64_y &= ~(crypto_int64_x & (((crypto_int64) 1) << (64-1))); + return crypto_int64_ones_num(crypto_int64_y); +#endif +} + +#endif + +/* from supercop-20240808/crypto_sort/int32/portable4/sort.c */ +#define int32_MINMAX(a,b) crypto_int32_minmax(&a,&b) static void crypto_sort_int32(void *array,long long n) { @@ -93,7 +1670,7 @@ static void crypto_sort_int32(void *array,long long n) } } -/* from supercop-20201130/crypto_sort/uint32/useint32/sort.c */ +/* from supercop-20240808/crypto_sort/uint32/useint32/sort.c */ /* can save time by vectorizing xor loops */ /* can save time by integrating xor loops with int32_sort */ @@ -107,1163 +1684,470 @@ static void crypto_sort_uint32(void *array,long long n) for (j = 0;j < n;++j) x[j] ^= 0x80000000; } -/* from supercop-20201130/crypto_kem/sntrup761/ref/uint32.c */ +/* from supercop-20240808/crypto_kem/sntrup761/compact/kem.c */ +// 20240806 djb: some automated conversion to cryptoint -/* -CPU division instruction typically takes time depending on x. -This software is designed to take time independent of x. -Time still varies depending on m; user must ensure that m is constant. -Time also varies on CPUs where multiplication is variable-time. -There could be more CPU issues. -There could also be compiler issues. -*/ +#define p 761 +#define q 4591 +#define w 286 +#define q12 ((q - 1) / 2) +typedef int8_t small; +typedef int16_t Fq; +#define Hash_bytes 32 +#define Small_bytes ((p + 3) / 4) +typedef small Inputs[p]; +#define SecretKeys_bytes (2 * Small_bytes) +#define Confirm_bytes 32 -static void uint32_divmod_uint14(uint32 *q,uint16 *r,uint32 x,uint16 m) -{ - uint32 v = 0x80000000; - uint32 qpart; - uint32 mask; +static small F3_freeze(int16_t x) { return x - 3 * ((10923 * x + 16384) >> 15); } - v /= m; +static Fq Fq_freeze(int32_t x) { + const int32_t q16 = (0x10000 + q / 2) / q; + const int32_t q20 = (0x100000 + q / 2) / q; + const int32_t q28 = (0x10000000 + q / 2) / q; + x -= q * ((q16 * x) >> 16); + x -= q * ((q20 * x) >> 20); + return x - q * ((q28 * x + 0x8000000) >> 28); +} - /* caller guarantees m > 0 */ - /* caller guarantees m < 16384 */ - /* vm <= 2^31 <= vm+m-1 */ - /* xvm <= 2^31 x <= xvm+x(m-1) */ - - *q = 0; - - qpart = (x*(uint64)v)>>31; - /* 2^31 qpart <= xv <= 2^31 qpart + 2^31-1 */ - /* 2^31 qpart m <= xvm <= 2^31 qpart m + (2^31-1)m */ - /* 2^31 qpart m <= 2^31 x <= 2^31 qpart m + (2^31-1)m + x(m-1) */ - /* 0 <= 2^31 newx <= (2^31-1)m + x(m-1) */ - /* 0 <= newx <= (1-1/2^31)m + x(m-1)/2^31 */ - /* 0 <= newx <= (1-1/2^31)(2^14-1) + (2^32-1)((2^14-1)-1)/2^31 */ - - x -= qpart*m; *q += qpart; - /* x <= 49146 */ - - qpart = (x*(uint64)v)>>31; - /* 0 <= newx <= (1-1/2^31)m + x(m-1)/2^31 */ - /* 0 <= newx <= m + 49146(2^14-1)/2^31 */ - /* 0 <= newx <= m + 0.4 */ - /* 0 <= newx <= m */ - - x -= qpart*m; *q += qpart; - /* x <= m */ - - x -= m; *q += 1; - mask = -(x>>31); - x += mask&(uint32)m; *q += mask; - /* x < m */ +static int Weightw_mask(small *r) { + int i, weight = 0; + for (i = 0; i < p; ++i) weight += crypto_int64_bottombit_01(r[i]); + return crypto_int16_nonzero_mask(weight - w); +} +static void uint32_divmod_uint14(uint32_t *Q, uint16_t *r, uint32_t x, uint16_t m) { + uint32_t qpart, mask, v = 0x80000000 / m; + qpart = (x * (uint64_t)v) >> 31; + x -= qpart * m; + *Q = qpart; + qpart = (x * (uint64_t)v) >> 31; + x -= qpart * m; + *Q += qpart; + x -= m; + *Q += 1; + mask = crypto_int32_negative_mask(x); + x += mask & (uint32_t)m; + *Q += mask; *r = x; } - -static uint16 uint32_mod_uint14(uint32 x,uint16 m) -{ - uint32 q; - uint16 r; - uint32_divmod_uint14(&q,&r,x,m); +static uint16_t uint32_mod_uint14(uint32_t x, uint16_t m) { + uint32_t Q; + uint16_t r; + uint32_divmod_uint14(&Q, &r, x, m); return r; } -/* from supercop-20201130/crypto_kem/sntrup761/ref/int32.c */ - -static void int32_divmod_uint14(int32 *q,uint16 *r,int32 x,uint16 m) -{ - uint32 uq,uq2; - uint16 ur,ur2; - uint32 mask; - - uint32_divmod_uint14(&uq,&ur,0x80000000+(uint32)x,m); - uint32_divmod_uint14(&uq2,&ur2,0x80000000,m); - ur -= ur2; uq -= uq2; - mask = -(uint32)(ur>>15); - ur += mask&m; uq += mask; - *r = ur; *q = uq; +static void Encode(unsigned char *out, const uint16_t *R, const uint16_t *M, long long len) { + if (len == 1) { + uint16_t r = R[0], m = M[0]; + while (m > 1) { + *out++ = r; + r >>= 8; + m = (m + 255) >> 8; + } + } + if (len > 1) { + uint16_t R2[(len + 1) / 2], M2[(len + 1) / 2]; + long long i; + for (i = 0; i < len - 1; i += 2) { + uint32_t m0 = M[i]; + uint32_t r = R[i] + R[i + 1] * m0; + uint32_t m = M[i + 1] * m0; + while (m >= 16384) { + *out++ = r; + r >>= 8; + m = (m + 255) >> 8; + } + R2[i / 2] = r; + M2[i / 2] = m; + } + if (i < len) { + R2[i / 2] = R[i]; + M2[i / 2] = M[i]; + } + Encode(out, R2, M2, (len + 1) / 2); + } } - -static uint16 int32_mod_uint14(int32 x,uint16 m) -{ - int32 q; - uint16 r; - int32_divmod_uint14(&q,&r,x,m); - return r; -} - -/* from supercop-20201130/crypto_kem/sntrup761/ref/paramsmenu.h */ -/* pick one of these three: */ -#define SIZE761 -#undef SIZE653 -#undef SIZE857 - -/* pick one of these two: */ -#define SNTRUP /* Streamlined NTRU Prime */ -#undef LPR /* NTRU LPRime */ - -/* from supercop-20201130/crypto_kem/sntrup761/ref/params.h */ -#ifndef params_H -#define params_H - -/* menu of parameter choices: */ - - -/* what the menu means: */ - -#if defined(SIZE761) -#define p 761 -#define q 4591 -#define Rounded_bytes 1007 -#ifndef LPR -#define Rq_bytes 1158 -#define w 286 -#else -#define w 250 -#define tau0 2156 -#define tau1 114 -#define tau2 2007 -#define tau3 287 -#endif - -#elif defined(SIZE653) -#define p 653 -#define q 4621 -#define Rounded_bytes 865 -#ifndef LPR -#define Rq_bytes 994 -#define w 288 -#else -#define w 252 -#define tau0 2175 -#define tau1 113 -#define tau2 2031 -#define tau3 290 -#endif - -#elif defined(SIZE857) -#define p 857 -#define q 5167 -#define Rounded_bytes 1152 -#ifndef LPR -#define Rq_bytes 1322 -#define w 322 -#else -#define w 281 -#define tau0 2433 -#define tau1 101 -#define tau2 2265 -#define tau3 324 -#endif - -#else -#error "no parameter set defined" -#endif - -#ifdef LPR -#define I 256 -#endif - -#endif - -/* from supercop-20201130/crypto_kem/sntrup761/ref/Decode.h */ -#ifndef Decode_H -#define Decode_H - - -/* Decode(R,s,M,len) */ -/* assumes 0 < M[i] < 16384 */ -/* produces 0 <= R[i] < M[i] */ - -#endif - -/* from supercop-20201130/crypto_kem/sntrup761/ref/Decode.c */ - -static void Decode(uint16 *out,const unsigned char *S,const uint16 *M,long long len) -{ +static void Decode(uint16_t *out, const unsigned char *S, const uint16_t *M, long long len) { if (len == 1) { if (M[0] == 1) *out = 0; else if (M[0] <= 256) - *out = uint32_mod_uint14(S[0],M[0]); + *out = uint32_mod_uint14(S[0], M[0]); else - *out = uint32_mod_uint14(S[0]+(((uint16)S[1])<<8),M[0]); + *out = uint32_mod_uint14(S[0] + (((uint16_t)S[1]) << 8), M[0]); } if (len > 1) { - uint16 R2[(len+1)/2]; - uint16 M2[(len+1)/2]; - uint16 bottomr[len/2]; - uint32 bottomt[len/2]; + uint16_t R2[(len + 1) / 2], M2[(len + 1) / 2], bottomr[len / 2]; + uint32_t bottomt[len / 2]; long long i; - for (i = 0;i < len-1;i += 2) { - uint32 m = M[i]*(uint32) M[i+1]; - if (m > 256*16383) { - bottomt[i/2] = 256*256; - bottomr[i/2] = S[0]+256*S[1]; + for (i = 0; i < len - 1; i += 2) { + uint32_t m = M[i] * (uint32_t)M[i + 1]; + if (m > 256 * 16383) { + bottomt[i / 2] = 256 * 256; + bottomr[i / 2] = S[0] + 256 * S[1]; S += 2; - M2[i/2] = (((m+255)>>8)+255)>>8; + M2[i / 2] = (((m + 255) >> 8) + 255) >> 8; } else if (m >= 16384) { - bottomt[i/2] = 256; - bottomr[i/2] = S[0]; + bottomt[i / 2] = 256; + bottomr[i / 2] = S[0]; S += 1; - M2[i/2] = (m+255)>>8; + M2[i / 2] = (m + 255) >> 8; } else { - bottomt[i/2] = 1; - bottomr[i/2] = 0; - M2[i/2] = m; + bottomt[i / 2] = 1; + bottomr[i / 2] = 0; + M2[i / 2] = m; } } - if (i < len) - M2[i/2] = M[i]; - Decode(R2,S,M2,(len+1)/2); - for (i = 0;i < len-1;i += 2) { - uint32 r = bottomr[i/2]; - uint32 r1; - uint16 r0; - r += bottomt[i/2]*R2[i/2]; - uint32_divmod_uint14(&r1,&r0,r,M[i]); - r1 = uint32_mod_uint14(r1,M[i+1]); /* only needed for invalid inputs */ + if (i < len) M2[i / 2] = M[i]; + Decode(R2, S, M2, (len + 1) / 2); + for (i = 0; i < len - 1; i += 2) { + uint32_t r1, r = bottomr[i / 2]; + uint16_t r0; + r += bottomt[i / 2] * R2[i / 2]; + uint32_divmod_uint14(&r1, &r0, r, M[i]); + r1 = uint32_mod_uint14(r1, M[i + 1]); *out++ = r0; *out++ = r1; } - if (i < len) - *out++ = R2[i/2]; + if (i < len) *out++ = R2[i / 2]; } } -/* from supercop-20201130/crypto_kem/sntrup761/ref/Encode.h */ -#ifndef Encode_H -#define Encode_H +static void R3_fromRq(small *out, const Fq *r) { + int i; + for (i = 0; i < p; ++i) out[i] = F3_freeze(r[i]); +} +static void R3_mult(small *h, const small *f, const small *g) { + int16_t fg[p + p - 1]; + int i, j; + for (i = 0; i < p + p - 1; ++i) fg[i] = 0; + for (i = 0; i < p; ++i) + for (j = 0; j < p; ++j) fg[i + j] += f[i] * (int16_t)g[j]; + for (i = p; i < p + p - 1; ++i) fg[i - p] += fg[i]; + for (i = p; i < p + p - 1; ++i) fg[i - p + 1] += fg[i]; + for (i = 0; i < p; ++i) h[i] = F3_freeze(fg[i]); +} -/* Encode(s,R,M,len) */ -/* assumes 0 <= R[i] < M[i] < 16384 */ - -#endif - -/* from supercop-20201130/crypto_kem/sntrup761/ref/Encode.c */ - -/* 0 <= R[i] < M[i] < 16384 */ -static void Encode(unsigned char *out,const uint16 *R,const uint16 *M,long long len) -{ - if (len == 1) { - uint16 r = R[0]; - uint16 m = M[0]; - while (m > 1) { - *out++ = r; - r >>= 8; - m = (m+255)>>8; +static int R3_recip(small *out, const small *in) { + small f[p + 1], g[p + 1], v[p + 1], r[p + 1]; + int sign, swap, t, i, loop, delta = 1; + for (i = 0; i < p + 1; ++i) v[i] = 0; + for (i = 0; i < p + 1; ++i) r[i] = 0; + r[0] = 1; + for (i = 0; i < p; ++i) f[i] = 0; + f[0] = 1; + f[p - 1] = f[p] = -1; + for (i = 0; i < p; ++i) g[p - 1 - i] = in[i]; + g[p] = 0; + for (loop = 0; loop < 2 * p - 1; ++loop) { + for (i = p; i > 0; --i) v[i] = v[i - 1]; + v[0] = 0; + sign = -g[0] * f[0]; + swap = crypto_int16_negative_mask(-delta) & crypto_int16_nonzero_mask(g[0]); + delta ^= swap & (delta ^ -delta); + delta += 1; + for (i = 0; i < p + 1; ++i) { + t = swap & (f[i] ^ g[i]); + f[i] ^= t; + g[i] ^= t; + t = swap & (v[i] ^ r[i]); + v[i] ^= t; + r[i] ^= t; } + for (i = 0; i < p + 1; ++i) g[i] = F3_freeze(g[i] + sign * f[i]); + for (i = 0; i < p + 1; ++i) r[i] = F3_freeze(r[i] + sign * v[i]); + for (i = 0; i < p; ++i) g[i] = g[i + 1]; + g[p] = 0; } - if (len > 1) { - uint16 R2[(len+1)/2]; - uint16 M2[(len+1)/2]; - long long i; - for (i = 0;i < len-1;i += 2) { - uint32 m0 = M[i]; - uint32 r = R[i]+R[i+1]*m0; - uint32 m = M[i+1]*m0; - while (m >= 16384) { - *out++ = r; - r >>= 8; - m = (m+255)>>8; - } - R2[i/2] = r; - M2[i/2] = m; - } - if (i < len) { - R2[i/2] = R[i]; - M2[i/2] = M[i]; - } - Encode(out,R2,M2,(len+1)/2); - } + sign = f[0]; + for (i = 0; i < p; ++i) out[i] = sign * v[p - 1 - i]; + return crypto_int16_nonzero_mask(delta); } -/* from supercop-20201130/crypto_kem/sntrup761/ref/kem.c */ - -#ifdef LPR -#endif - - -/* ----- masks */ - -#ifndef LPR - -/* return -1 if x!=0; else return 0 */ -static int int16_nonzero_mask(int16 x) -{ - uint16 u = x; /* 0, else 1...65535 */ - uint32 v = u; /* 0, else 1...65535 */ - v = -v; /* 0, else 2^32-65535...2^32-1 */ - v >>= 31; /* 0, else 1 */ - return -v; /* 0, else -1 */ +static void Rq_mult_small(Fq *h, const Fq *f, const small *g) { + int32_t fg[p + p - 1]; + int i, j; + for (i = 0; i < p + p - 1; ++i) fg[i] = 0; + for (i = 0; i < p; ++i) + for (j = 0; j < p; ++j) fg[i + j] += f[i] * (int32_t)g[j]; + for (i = p; i < p + p - 1; ++i) fg[i - p] += fg[i]; + for (i = p; i < p + p - 1; ++i) fg[i - p + 1] += fg[i]; + for (i = 0; i < p; ++i) h[i] = Fq_freeze(fg[i]); } -#endif - -/* return -1 if x<0; otherwise return 0 */ -static int int16_negative_mask(int16 x) -{ - uint16 u = x; - u >>= 15; - return -(int) u; - /* alternative with gcc -fwrapv: */ - /* x>>15 compiles to CPU's arithmetic right shift */ +static void Rq_mult3(Fq *h, const Fq *f) { + int i; + for (i = 0; i < p; ++i) h[i] = Fq_freeze(3 * f[i]); } -/* ----- arithmetic mod 3 */ - -typedef int8 small; - -/* F3 is always represented as -1,0,1 */ -/* so ZZ_fromF3 is a no-op */ - -/* x must not be close to top int16 */ -static small F3_freeze(int16 x) -{ - return int32_mod_uint14(x+1,3)-1; -} - -/* ----- arithmetic mod q */ - -#define q12 ((q-1)/2) -typedef int16 Fq; -/* always represented as -q12...q12 */ -/* so ZZ_fromFq is a no-op */ - -/* x must not be close to top int32 */ -static Fq Fq_freeze(int32 x) -{ - return int32_mod_uint14(x+q12,q)-q12; -} - -#ifndef LPR - -static Fq Fq_recip(Fq a1) -{ +static Fq Fq_recip(Fq a1) { int i = 1; Fq ai = a1; - - while (i < q-2) { - ai = Fq_freeze(a1*(int32)ai); + while (i < q - 2) { + ai = Fq_freeze(a1 * (int32_t)ai); i += 1; } return ai; } -#endif - -/* ----- Top and Right */ - -#ifdef LPR -#define tau 16 - -static int8 Top(Fq C) -{ - return (tau1*(int32)(C+tau0)+16384)>>15; -} - -static Fq Right(int8 T) -{ - return Fq_freeze(tau3*(int32)T-tau2); -} -#endif - -/* ----- small polynomials */ - -#ifndef LPR - -/* 0 if Weightw_is(r), else -1 */ -static int Weightw_mask(small *r) -{ - int weight = 0; - int i; - - for (i = 0;i < p;++i) weight += r[i]&1; - return int16_nonzero_mask(weight-w); -} - -/* R3_fromR(R_fromRq(r)) */ -static void R3_fromRq(small *out,const Fq *r) -{ - int i; - for (i = 0;i < p;++i) out[i] = F3_freeze(r[i]); -} - -/* h = f*g in the ring R3 */ -static void R3_mult(small *h,const small *f,const small *g) -{ - small fg[p+p-1]; - small result; - int i,j; - - for (i = 0;i < p;++i) { - result = 0; - for (j = 0;j <= i;++j) result = F3_freeze(result+f[j]*g[i-j]); - fg[i] = result; - } - for (i = p;i < p+p-1;++i) { - result = 0; - for (j = i-p+1;j < p;++j) result = F3_freeze(result+f[j]*g[i-j]); - fg[i] = result; - } - - for (i = p+p-2;i >= p;--i) { - fg[i-p] = F3_freeze(fg[i-p]+fg[i]); - fg[i-p+1] = F3_freeze(fg[i-p+1]+fg[i]); - } - - for (i = 0;i < p;++i) h[i] = fg[i]; -} - -/* returns 0 if recip succeeded; else -1 */ -static int R3_recip(small *out,const small *in) -{ - small f[p+1],g[p+1],v[p+1],r[p+1]; - int i,loop,delta; - int sign,swap,t; - - for (i = 0;i < p+1;++i) v[i] = 0; - for (i = 0;i < p+1;++i) r[i] = 0; - r[0] = 1; - for (i = 0;i < p;++i) f[i] = 0; - f[0] = 1; f[p-1] = f[p] = -1; - for (i = 0;i < p;++i) g[p-1-i] = in[i]; - g[p] = 0; - - delta = 1; - - for (loop = 0;loop < 2*p-1;++loop) { - for (i = p;i > 0;--i) v[i] = v[i-1]; - v[0] = 0; - - sign = -g[0]*f[0]; - swap = int16_negative_mask(-delta) & int16_nonzero_mask(g[0]); - delta ^= swap&(delta^-delta); - delta += 1; - - for (i = 0;i < p+1;++i) { - t = swap&(f[i]^g[i]); f[i] ^= t; g[i] ^= t; - t = swap&(v[i]^r[i]); v[i] ^= t; r[i] ^= t; - } - - for (i = 0;i < p+1;++i) g[i] = F3_freeze(g[i]+sign*f[i]); - for (i = 0;i < p+1;++i) r[i] = F3_freeze(r[i]+sign*v[i]); - - for (i = 0;i < p;++i) g[i] = g[i+1]; - g[p] = 0; - } - - sign = f[0]; - for (i = 0;i < p;++i) out[i] = sign*v[p-1-i]; - - return int16_nonzero_mask(delta); -} - -#endif - -/* ----- polynomials mod q */ - -/* h = f*g in the ring Rq */ -static void Rq_mult_small(Fq *h,const Fq *f,const small *g) -{ - Fq fg[p+p-1]; - Fq result; - int i,j; - - for (i = 0;i < p;++i) { - result = 0; - for (j = 0;j <= i;++j) result = Fq_freeze(result+f[j]*(int32)g[i-j]); - fg[i] = result; - } - for (i = p;i < p+p-1;++i) { - result = 0; - for (j = i-p+1;j < p;++j) result = Fq_freeze(result+f[j]*(int32)g[i-j]); - fg[i] = result; - } - - for (i = p+p-2;i >= p;--i) { - fg[i-p] = Fq_freeze(fg[i-p]+fg[i]); - fg[i-p+1] = Fq_freeze(fg[i-p+1]+fg[i]); - } - - for (i = 0;i < p;++i) h[i] = fg[i]; -} - -#ifndef LPR - -/* h = 3f in Rq */ -static void Rq_mult3(Fq *h,const Fq *f) -{ - int i; - - for (i = 0;i < p;++i) h[i] = Fq_freeze(3*f[i]); -} - -/* out = 1/(3*in) in Rq */ -/* returns 0 if recip succeeded; else -1 */ -static int Rq_recip3(Fq *out,const small *in) -{ - Fq f[p+1],g[p+1],v[p+1],r[p+1]; - int i,loop,delta; - int swap,t; - int32 f0,g0; - Fq scale; - - for (i = 0;i < p+1;++i) v[i] = 0; - for (i = 0;i < p+1;++i) r[i] = 0; +static int Rq_recip3(Fq *out, const small *in) { + Fq f[p + 1], g[p + 1], v[p + 1], r[p + 1], scale; + int swap, t, i, loop, delta = 1; + int32_t f0, g0; + for (i = 0; i < p + 1; ++i) v[i] = 0; + for (i = 0; i < p + 1; ++i) r[i] = 0; r[0] = Fq_recip(3); - for (i = 0;i < p;++i) f[i] = 0; - f[0] = 1; f[p-1] = f[p] = -1; - for (i = 0;i < p;++i) g[p-1-i] = in[i]; + for (i = 0; i < p; ++i) f[i] = 0; + f[0] = 1; + f[p - 1] = f[p] = -1; + for (i = 0; i < p; ++i) g[p - 1 - i] = in[i]; g[p] = 0; - - delta = 1; - - for (loop = 0;loop < 2*p-1;++loop) { - for (i = p;i > 0;--i) v[i] = v[i-1]; + for (loop = 0; loop < 2 * p - 1; ++loop) { + for (i = p; i > 0; --i) v[i] = v[i - 1]; v[0] = 0; - - swap = int16_negative_mask(-delta) & int16_nonzero_mask(g[0]); - delta ^= swap&(delta^-delta); + swap = crypto_int16_negative_mask(-delta) & crypto_int16_nonzero_mask(g[0]); + delta ^= swap & (delta ^ -delta); delta += 1; - - for (i = 0;i < p+1;++i) { - t = swap&(f[i]^g[i]); f[i] ^= t; g[i] ^= t; - t = swap&(v[i]^r[i]); v[i] ^= t; r[i] ^= t; + for (i = 0; i < p + 1; ++i) { + t = swap & (f[i] ^ g[i]); + f[i] ^= t; + g[i] ^= t; + t = swap & (v[i] ^ r[i]); + v[i] ^= t; + r[i] ^= t; } - f0 = f[0]; g0 = g[0]; - for (i = 0;i < p+1;++i) g[i] = Fq_freeze(f0*g[i]-g0*f[i]); - for (i = 0;i < p+1;++i) r[i] = Fq_freeze(f0*r[i]-g0*v[i]); - - for (i = 0;i < p;++i) g[i] = g[i+1]; + for (i = 0; i < p + 1; ++i) g[i] = Fq_freeze(f0 * g[i] - g0 * f[i]); + for (i = 0; i < p + 1; ++i) r[i] = Fq_freeze(f0 * r[i] - g0 * v[i]); + for (i = 0; i < p; ++i) g[i] = g[i + 1]; g[p] = 0; } - scale = Fq_recip(f[0]); - for (i = 0;i < p;++i) out[i] = Fq_freeze(scale*(int32)v[p-1-i]); - - return int16_nonzero_mask(delta); + for (i = 0; i < p; ++i) out[i] = Fq_freeze(scale * (int32_t)v[p - 1 - i]); + return crypto_int16_nonzero_mask(delta); } -#endif - -/* ----- rounded polynomials mod q */ - -static void Round(Fq *out,const Fq *a) -{ +static void Round(Fq *out, const Fq *a) { int i; - for (i = 0;i < p;++i) out[i] = a[i]-F3_freeze(a[i]); + for (i = 0; i < p; ++i) out[i] = a[i] - F3_freeze(a[i]); } -/* ----- sorting to generate short polynomial */ - -static void Short_fromlist(small *out,const uint32 *in) -{ - uint32 L[p]; +static void Short_fromlist(small *out, const uint32_t *in) { + uint32_t L[p]; int i; - - for (i = 0;i < w;++i) L[i] = in[i]&(uint32)-2; - for (i = w;i < p;++i) L[i] = (in[i]&(uint32)-3)|1; - crypto_sort_uint32(L,p); - for (i = 0;i < p;++i) out[i] = (L[i]&3)-1; + for (i = 0; i < w; ++i) L[i] = in[i] & (uint32_t)-2; + for (i = w; i < p; ++i) L[i] = (in[i] & (uint32_t)-3) | 1; + crypto_sort_uint32(L, p); + for (i = 0; i < p; ++i) out[i] = (L[i] & 3) - 1; } -/* ----- underlying hash function */ - -#define Hash_bytes 32 - -/* e.g., b = 0 means out = Hash0(in) */ -static void Hash_prefix(unsigned char *out,int b,const unsigned char *in,int inlen) -{ - unsigned char x[inlen+1]; - unsigned char h[64]; +static void Hash_prefix(unsigned char *out, int b, const unsigned char *in, int inlen) { + unsigned char x[inlen + 1], h[64]; int i; - x[0] = b; - for (i = 0;i < inlen;++i) x[i+1] = in[i]; - crypto_hash_sha512(h,x,inlen+1); - for (i = 0;i < 32;++i) out[i] = h[i]; + for (i = 0; i < inlen; ++i) x[i + 1] = in[i]; + crypto_hash_sha512(h, x, inlen + 1); + for (i = 0; i < 32; ++i) out[i] = h[i]; } -/* ----- higher-level randomness */ - -static uint32 urandom32(void) -{ +static uint32_t urandom32(void) { unsigned char c[4]; - uint32 out[4]; - - randombytes(c,4); - out[0] = (uint32)c[0]; - out[1] = ((uint32)c[1])<<8; - out[2] = ((uint32)c[2])<<16; - out[3] = ((uint32)c[3])<<24; - return out[0]+out[1]+out[2]+out[3]; -} - -static void Short_random(small *out) -{ - uint32 L[p]; + uint32_t result = 0; int i; - - for (i = 0;i < p;++i) L[i] = urandom32(); - Short_fromlist(out,L); + randombytes(c, 4); + for (i = 0; i < 4; ++i) result += ((uint32_t)c[i]) << (8 * i); + return result; } -#ifndef LPR - -static void Small_random(small *out) -{ +static void Short_random(small *out) { + uint32_t L[p]; int i; - - for (i = 0;i < p;++i) out[i] = (((urandom32()&0x3fffffff)*3)>>30)-1; + for (i = 0; i < p; ++i) L[i] = urandom32(); + Short_fromlist(out, L); } -#endif +static void Small_random(small *out) { + int i; + for (i = 0; i < p; ++i) out[i] = (((urandom32() & 0x3fffffff) * 3) >> 30) - 1; +} -/* ----- Streamlined NTRU Prime Core */ - -#ifndef LPR - -/* h,(f,ginv) = KeyGen() */ -static void KeyGen(Fq *h,small *f,small *ginv) -{ +static void KeyGen(Fq *h, small *f, small *ginv) { small g[p]; Fq finv[p]; - for (;;) { + int result; Small_random(g); - if (R3_recip(ginv,g) == 0) break; + result = R3_recip(ginv, g); + crypto_declassify(&result, sizeof result); + if (result == 0) break; } Short_random(f); - Rq_recip3(finv,f); /* always works */ - Rq_mult_small(h,finv,g); + Rq_recip3(finv, f); + Rq_mult_small(h, finv, g); } -/* c = Encrypt(r,h) */ -static void Encrypt(Fq *c,const small *r,const Fq *h) -{ +static void Encrypt(Fq *c, const small *r, const Fq *h) { Fq hr[p]; - - Rq_mult_small(hr,h,r); - Round(c,hr); + Rq_mult_small(hr, h, r); + Round(c, hr); } -/* r = Decrypt(c,(f,ginv)) */ -static void Decrypt(small *r,const Fq *c,const small *f,const small *ginv) -{ - Fq cf[p]; - Fq cf3[p]; - small e[p]; - small ev[p]; - int mask; - int i; - - Rq_mult_small(cf,c,f); - Rq_mult3(cf3,cf); - R3_fromRq(e,cf3); - R3_mult(ev,e,ginv); - - mask = Weightw_mask(ev); /* 0 if weight w, else -1 */ - for (i = 0;i < w;++i) r[i] = ((ev[i]^1)&~mask)^1; - for (i = w;i < p;++i) r[i] = ev[i]&~mask; +static void Decrypt(small *r, const Fq *c, const small *f, const small *ginv) { + Fq cf[p], cf3[p]; + small e[p], ev[p]; + int mask, i; + Rq_mult_small(cf, c, f); + Rq_mult3(cf3, cf); + R3_fromRq(e, cf3); + R3_mult(ev, e, ginv); + mask = Weightw_mask(ev); + for (i = 0; i < w; ++i) r[i] = ((ev[i] ^ 1) & ~mask) ^ 1; + for (i = w; i < p; ++i) r[i] = ev[i] & ~mask; } -#endif - -/* ----- NTRU LPRime Core */ - -#ifdef LPR - -/* (G,A),a = KeyGen(G); leaves G unchanged */ -static void KeyGen(Fq *A,small *a,const Fq *G) -{ - Fq aG[p]; - - Short_random(a); - Rq_mult_small(aG,G,a); - Round(A,aG); -} - -/* B,T = Encrypt(r,(G,A),b) */ -static void Encrypt(Fq *B,int8 *T,const int8 *r,const Fq *G,const Fq *A,const small *b) -{ - Fq bG[p]; - Fq bA[p]; - int i; - - Rq_mult_small(bG,G,b); - Round(B,bG); - Rq_mult_small(bA,A,b); - for (i = 0;i < I;++i) T[i] = Top(Fq_freeze(bA[i]+r[i]*q12)); -} - -/* r = Decrypt((B,T),a) */ -static void Decrypt(int8 *r,const Fq *B,const int8 *T,const small *a) -{ - Fq aB[p]; - int i; - - Rq_mult_small(aB,B,a); - for (i = 0;i < I;++i) - r[i] = -int16_negative_mask(Fq_freeze(Right(T[i])-aB[i]+4*w+1)); -} - -#endif - -/* ----- encoding I-bit inputs */ - -#ifdef LPR - -#define Inputs_bytes (I/8) -typedef int8 Inputs[I]; /* passed by reference */ - -static void Inputs_encode(unsigned char *s,const Inputs r) -{ - int i; - for (i = 0;i < Inputs_bytes;++i) s[i] = 0; - for (i = 0;i < I;++i) s[i>>3] |= r[i]<<(i&7); -} - -#endif - -/* ----- Expand */ - -#ifdef LPR - -static const unsigned char aes_nonce[16] = {0}; - -static void Expand(uint32 *L,const unsigned char *k) -{ - int i; - crypto_stream_aes256ctr((unsigned char *) L,4*p,aes_nonce,k); - for (i = 0;i < p;++i) { - uint32 L0 = ((unsigned char *) L)[4*i]; - uint32 L1 = ((unsigned char *) L)[4*i+1]; - uint32 L2 = ((unsigned char *) L)[4*i+2]; - uint32 L3 = ((unsigned char *) L)[4*i+3]; - L[i] = L0+(L1<<8)+(L2<<16)+(L3<<24); - } -} - -#endif - -/* ----- Seeds */ - -#ifdef LPR - -#define Seeds_bytes 32 - -static void Seeds_random(unsigned char *s) -{ - randombytes(s,Seeds_bytes); -} - -#endif - -/* ----- Generator, HashShort */ - -#ifdef LPR - -/* G = Generator(k) */ -static void Generator(Fq *G,const unsigned char *k) -{ - uint32 L[p]; - int i; - - Expand(L,k); - for (i = 0;i < p;++i) G[i] = uint32_mod_uint14(L[i],q)-q12; -} - -/* out = HashShort(r) */ -static void HashShort(small *out,const Inputs r) -{ - unsigned char s[Inputs_bytes]; - unsigned char h[Hash_bytes]; - uint32 L[p]; - - Inputs_encode(s,r); - Hash_prefix(h,5,s,sizeof s); - Expand(L,h); - Short_fromlist(out,L); -} - -#endif - -/* ----- NTRU LPRime Expand */ - -#ifdef LPR - -/* (S,A),a = XKeyGen() */ -static void XKeyGen(unsigned char *S,Fq *A,small *a) -{ - Fq G[p]; - - Seeds_random(S); - Generator(G,S); - KeyGen(A,a,G); -} - -/* B,T = XEncrypt(r,(S,A)) */ -static void XEncrypt(Fq *B,int8 *T,const int8 *r,const unsigned char *S,const Fq *A) -{ - Fq G[p]; - small b[p]; - - Generator(G,S); - HashShort(b,r); - Encrypt(B,T,r,G,A,b); -} - -#define XDecrypt Decrypt - -#endif - -/* ----- encoding small polynomials (including short polynomials) */ - -#define Small_bytes ((p+3)/4) - -/* these are the only functions that rely on p mod 4 = 1 */ - -static void Small_encode(unsigned char *s,const small *f) -{ - small x; - int i; - - for (i = 0;i < p/4;++i) { - x = *f++ + 1; - x += (*f++ + 1)<<2; - x += (*f++ + 1)<<4; - x += (*f++ + 1)<<6; +static void Small_encode(unsigned char *s, const small *f) { + int i, j; + for (i = 0; i < p / 4; ++i) { + small x = 0; + for (j = 0;j < 4;++j) x += (*f++ + 1) << (2 * j); *s++ = x; } - x = *f++ + 1; - *s++ = x; + *s = *f++ + 1; } -static void Small_decode(small *f,const unsigned char *s) -{ - unsigned char x; - int i; - - for (i = 0;i < p/4;++i) { - x = *s++; - *f++ = ((small)(x&3))-1; x >>= 2; - *f++ = ((small)(x&3))-1; x >>= 2; - *f++ = ((small)(x&3))-1; x >>= 2; - *f++ = ((small)(x&3))-1; +static void Small_decode(small *f, const unsigned char *s) { + int i, j; + for (i = 0; i < p / 4; ++i) { + unsigned char x = *s++; + for (j = 0;j < 4;++j) *f++ = ((small)((x >> (2 * j)) & 3)) - 1; } - x = *s++; - *f++ = ((small)(x&3))-1; + *f++ = ((small)(*s & 3)) - 1; } -/* ----- encoding general polynomials */ - -#ifndef LPR - -static void Rq_encode(unsigned char *s,const Fq *r) -{ - uint16 R[p],M[p]; +static void Rq_encode(unsigned char *s, const Fq *r) { + uint16_t R[p], M[p]; int i; - - for (i = 0;i < p;++i) R[i] = r[i]+q12; - for (i = 0;i < p;++i) M[i] = q; - Encode(s,R,M,p); + for (i = 0; i < p; ++i) R[i] = r[i] + q12; + for (i = 0; i < p; ++i) M[i] = q; + Encode(s, R, M, p); } -static void Rq_decode(Fq *r,const unsigned char *s) -{ - uint16 R[p],M[p]; +static void Rq_decode(Fq *r, const unsigned char *s) { + uint16_t R[p], M[p]; int i; - - for (i = 0;i < p;++i) M[i] = q; - Decode(R,s,M,p); - for (i = 0;i < p;++i) r[i] = ((Fq)R[i])-q12; + for (i = 0; i < p; ++i) M[i] = q; + Decode(R, s, M, p); + for (i = 0; i < p; ++i) r[i] = ((Fq)R[i]) - q12; } -#endif - -/* ----- encoding rounded polynomials */ - -static void Rounded_encode(unsigned char *s,const Fq *r) -{ - uint16 R[p],M[p]; +static void Rounded_encode(unsigned char *s, const Fq *r) { + uint16_t R[p], M[p]; int i; - - for (i = 0;i < p;++i) R[i] = ((r[i]+q12)*10923)>>15; - for (i = 0;i < p;++i) M[i] = (q+2)/3; - Encode(s,R,M,p); + for (i = 0; i < p; ++i) R[i] = ((r[i] + q12) * 10923) >> 15; + for (i = 0; i < p; ++i) M[i] = (q + 2) / 3; + Encode(s, R, M, p); } -static void Rounded_decode(Fq *r,const unsigned char *s) -{ - uint16 R[p],M[p]; +static void Rounded_decode(Fq *r, const unsigned char *s) { + uint16_t R[p], M[p]; int i; - - for (i = 0;i < p;++i) M[i] = (q+2)/3; - Decode(R,s,M,p); - for (i = 0;i < p;++i) r[i] = R[i]*3-q12; + for (i = 0; i < p; ++i) M[i] = (q + 2) / 3; + Decode(R, s, M, p); + for (i = 0; i < p; ++i) r[i] = R[i] * 3 - q12; } -/* ----- encoding top polynomials */ - -#ifdef LPR - -#define Top_bytes (I/2) - -static void Top_encode(unsigned char *s,const int8 *T) -{ - int i; - for (i = 0;i < Top_bytes;++i) - s[i] = T[2*i]+(T[2*i+1]<<4); -} - -static void Top_decode(int8 *T,const unsigned char *s) -{ - int i; - for (i = 0;i < Top_bytes;++i) { - T[2*i] = s[i]&15; - T[2*i+1] = s[i]>>4; - } -} - -#endif - -/* ----- Streamlined NTRU Prime Core plus encoding */ - -#ifndef LPR - -typedef small Inputs[p]; /* passed by reference */ -#define Inputs_random Short_random -#define Inputs_encode Small_encode -#define Inputs_bytes Small_bytes - -#define Ciphertexts_bytes Rounded_bytes -#define SecretKeys_bytes (2*Small_bytes) -#define PublicKeys_bytes Rq_bytes - -/* pk,sk = ZKeyGen() */ -static void ZKeyGen(unsigned char *pk,unsigned char *sk) -{ +static void ZKeyGen(unsigned char *pk, unsigned char *sk) { Fq h[p]; - small f[p],v[p]; - - KeyGen(h,f,v); - Rq_encode(pk,h); - Small_encode(sk,f); sk += Small_bytes; - Small_encode(sk,v); + small f[p], v[p]; + KeyGen(h, f, v); + Rq_encode(pk, h); + Small_encode(sk, f); + Small_encode(sk + Small_bytes, v); } -/* C = ZEncrypt(r,pk) */ -static void ZEncrypt(unsigned char *C,const Inputs r,const unsigned char *pk) -{ - Fq h[p]; +static void ZEncrypt(unsigned char *C, const Inputs r, const unsigned char *pk) { + Fq h[p], c[p]; + Rq_decode(h, pk); + Encrypt(c, r, h); + Rounded_encode(C, c); +} + +static void ZDecrypt(Inputs r, const unsigned char *C, const unsigned char *sk) { + small f[p], v[p]; Fq c[p]; - Rq_decode(h,pk); - Encrypt(c,r,h); - Rounded_encode(C,c); + Small_decode(f, sk); + Small_decode(v, sk + Small_bytes); + Rounded_decode(c, C); + Decrypt(r, c, f, v); } -/* r = ZDecrypt(C,sk) */ -static void ZDecrypt(Inputs r,const unsigned char *C,const unsigned char *sk) -{ - small f[p],v[p]; - Fq c[p]; - - Small_decode(f,sk); sk += Small_bytes; - Small_decode(v,sk); - Rounded_decode(c,C); - Decrypt(r,c,f,v); -} - -#endif - -/* ----- NTRU LPRime Expand plus encoding */ - -#ifdef LPR - -#define Ciphertexts_bytes (Rounded_bytes+Top_bytes) -#define SecretKeys_bytes Small_bytes -#define PublicKeys_bytes (Seeds_bytes+Rounded_bytes) - -static void Inputs_random(Inputs r) -{ - unsigned char s[Inputs_bytes]; +static void HashConfirm(unsigned char *h, const unsigned char *r, const unsigned char *cache) { + unsigned char x[Hash_bytes * 2]; int i; - - randombytes(s,sizeof s); - for (i = 0;i < I;++i) r[i] = 1&(s[i>>3]>>(i&7)); + Hash_prefix(x, 3, r, Small_bytes); + for (i = 0; i < Hash_bytes; ++i) x[Hash_bytes + i] = cache[i]; + Hash_prefix(h, 2, x, sizeof x); } -/* pk,sk = ZKeyGen() */ -static void ZKeyGen(unsigned char *pk,unsigned char *sk) -{ - Fq A[p]; - small a[p]; - - XKeyGen(pk,A,a); pk += Seeds_bytes; - Rounded_encode(pk,A); - Small_encode(sk,a); -} - -/* c = ZEncrypt(r,pk) */ -static void ZEncrypt(unsigned char *c,const Inputs r,const unsigned char *pk) -{ - Fq A[p]; - Fq B[p]; - int8 T[I]; - - Rounded_decode(A,pk+Seeds_bytes); - XEncrypt(B,T,r,pk,A); - Rounded_encode(c,B); c += Rounded_bytes; - Top_encode(c,T); -} - -/* r = ZDecrypt(C,sk) */ -static void ZDecrypt(Inputs r,const unsigned char *c,const unsigned char *sk) -{ - small a[p]; - Fq B[p]; - int8 T[I]; - - Small_decode(a,sk); - Rounded_decode(B,c); - Top_decode(T,c+Rounded_bytes); - XDecrypt(r,B,T,a); -} - -#endif - -/* ----- confirmation hash */ - -#define Confirm_bytes 32 - -/* h = HashConfirm(r,pk,cache); cache is Hash4(pk) */ -static void HashConfirm(unsigned char *h,const unsigned char *r,const unsigned char *pk,const unsigned char *cache) -{ -#ifndef LPR - unsigned char x[Hash_bytes*2]; +static void HashSession(unsigned char *k, int b, const unsigned char *y, const unsigned char *z) { + unsigned char x[Hash_bytes + crypto_kem_sntrup761_CIPHERTEXTBYTES]; int i; - - Hash_prefix(x,3,r,Inputs_bytes); - for (i = 0;i < Hash_bytes;++i) x[Hash_bytes+i] = cache[i]; -#else - unsigned char x[Inputs_bytes+Hash_bytes]; - int i; - - for (i = 0;i < Inputs_bytes;++i) x[i] = r[i]; - for (i = 0;i < Hash_bytes;++i) x[Inputs_bytes+i] = cache[i]; -#endif - Hash_prefix(h,2,x,sizeof x); + Hash_prefix(x, 3, y, Small_bytes); + for (i = 0; i < crypto_kem_sntrup761_CIPHERTEXTBYTES; ++i) x[Hash_bytes + i] = z[i]; + Hash_prefix(k, b, x, sizeof x); } -/* ----- session-key hash */ - -/* k = HashSession(b,y,z) */ -static void HashSession(unsigned char *k,int b,const unsigned char *y,const unsigned char *z) -{ -#ifndef LPR - unsigned char x[Hash_bytes+Ciphertexts_bytes+Confirm_bytes]; +int crypto_kem_sntrup761_keypair(unsigned char *pk, unsigned char *sk) { int i; - - Hash_prefix(x,3,y,Inputs_bytes); - for (i = 0;i < Ciphertexts_bytes+Confirm_bytes;++i) x[Hash_bytes+i] = z[i]; -#else - unsigned char x[Inputs_bytes+Ciphertexts_bytes+Confirm_bytes]; - int i; - - for (i = 0;i < Inputs_bytes;++i) x[i] = y[i]; - for (i = 0;i < Ciphertexts_bytes+Confirm_bytes;++i) x[Inputs_bytes+i] = z[i]; -#endif - Hash_prefix(k,b,x,sizeof x); + ZKeyGen(pk, sk); + sk += SecretKeys_bytes; + for (i = 0; i < crypto_kem_sntrup761_PUBLICKEYBYTES; ++i) *sk++ = pk[i]; + randombytes(sk, Small_bytes); + Hash_prefix(sk + Small_bytes, 4, pk, crypto_kem_sntrup761_PUBLICKEYBYTES); + return 0; } -/* ----- Streamlined NTRU Prime and NTRU LPRime */ - -/* pk,sk = KEM_KeyGen() */ -static void KEM_KeyGen(unsigned char *pk,unsigned char *sk) -{ - int i; - - ZKeyGen(pk,sk); sk += SecretKeys_bytes; - for (i = 0;i < PublicKeys_bytes;++i) *sk++ = pk[i]; - randombytes(sk,Inputs_bytes); sk += Inputs_bytes; - Hash_prefix(sk,4,pk,PublicKeys_bytes); +static void Hide(unsigned char *c, unsigned char *r_enc, const Inputs r, const unsigned char *pk, const unsigned char *cache) { + Small_encode(r_enc, r); + ZEncrypt(c, r, pk); + HashConfirm(c + crypto_kem_sntrup761_CIPHERTEXTBYTES - Confirm_bytes, r_enc, cache); } -/* c,r_enc = Hide(r,pk,cache); cache is Hash4(pk) */ -static void Hide(unsigned char *c,unsigned char *r_enc,const Inputs r,const unsigned char *pk,const unsigned char *cache) -{ - Inputs_encode(r_enc,r); - ZEncrypt(c,r,pk); c += Ciphertexts_bytes; - HashConfirm(c,r_enc,pk,cache); -} - -/* c,k = Encap(pk) */ -static void Encap(unsigned char *c,unsigned char *k,const unsigned char *pk) -{ +int crypto_kem_sntrup761_enc(unsigned char *c, unsigned char *k, const unsigned char *pk) { Inputs r; - unsigned char r_enc[Inputs_bytes]; - unsigned char cache[Hash_bytes]; - - Hash_prefix(cache,4,pk,PublicKeys_bytes); - Inputs_random(r); - Hide(c,r_enc,r,pk,cache); - HashSession(k,1,r_enc,c); + unsigned char r_enc[Small_bytes], cache[Hash_bytes]; + Hash_prefix(cache, 4, pk, crypto_kem_sntrup761_PUBLICKEYBYTES); + Short_random(r); + Hide(c, r_enc, r, pk, cache); + HashSession(k, 1, r_enc, c); + return 0; } -/* 0 if matching ciphertext+confirm, else -1 */ -static int Ciphertexts_diff_mask(const unsigned char *c,const unsigned char *c2) -{ - uint16 differentbits = 0; - int len = Ciphertexts_bytes+Confirm_bytes; - - while (len-- > 0) differentbits |= (*c++)^(*c2++); - return (1&((differentbits-1)>>8))-1; +static int Ciphertexts_diff_mask(const unsigned char *c, const unsigned char *c2) { + uint16_t differentbits = 0; + int len = crypto_kem_sntrup761_CIPHERTEXTBYTES; + while (len-- > 0) differentbits |= (*c++) ^ (*c2++); + return (crypto_int64_bitmod_01((differentbits - 1),8)) - 1; } -/* k = Decap(c,sk) */ -static void Decap(unsigned char *k,const unsigned char *c,const unsigned char *sk) -{ +int crypto_kem_sntrup761_dec(unsigned char *k, const unsigned char *c, const unsigned char *sk) { const unsigned char *pk = sk + SecretKeys_bytes; - const unsigned char *rho = pk + PublicKeys_bytes; - const unsigned char *cache = rho + Inputs_bytes; + const unsigned char *rho = pk + crypto_kem_sntrup761_PUBLICKEYBYTES; + const unsigned char *cache = rho + Small_bytes; Inputs r; - unsigned char r_enc[Inputs_bytes]; - unsigned char cnew[Ciphertexts_bytes+Confirm_bytes]; - int mask; - int i; - - ZDecrypt(r,c,sk); - Hide(cnew,r_enc,r,pk,cache); - mask = Ciphertexts_diff_mask(c,cnew); - for (i = 0;i < Inputs_bytes;++i) r_enc[i] ^= mask&(r_enc[i]^rho[i]); - HashSession(k,1+mask,r_enc,c); -} - -/* ----- crypto_kem API */ - - -int crypto_kem_sntrup761_keypair(unsigned char *pk,unsigned char *sk) -{ - KEM_KeyGen(pk,sk); - return 0; -} - -int crypto_kem_sntrup761_enc(unsigned char *c,unsigned char *k,const unsigned char *pk) -{ - Encap(c,k,pk); - return 0; -} - -int crypto_kem_sntrup761_dec(unsigned char *k,const unsigned char *c,const unsigned char *sk) -{ - Decap(k,c,sk); + unsigned char r_enc[Small_bytes], cnew[crypto_kem_sntrup761_CIPHERTEXTBYTES]; + int mask, i; + ZDecrypt(r, c, sk); + Hide(cnew, r_enc, r, pk, cache); + mask = Ciphertexts_diff_mask(c, cnew); + for (i = 0; i < Small_bytes; ++i) r_enc[i] ^= mask & (r_enc[i] ^ rho[i]); + HashSession(k, 1 + mask, r_enc, c); return 0; } diff --git a/usr.bin/ssh/sntrup761.sh b/usr.bin/ssh/sntrup761.sh index db4e9aed0..92c803bb1 100644 --- a/usr.bin/ssh/sntrup761.sh +++ b/usr.bin/ssh/sntrup761.sh @@ -1,25 +1,18 @@ #!/bin/sh -# $OpenBSD: sntrup761.sh,v 1.7 2023/01/11 02:13:52 djm Exp $ +# $OpenBSD: sntrup761.sh,v 1.8 2024/09/15 02:20:51 djm Exp $ # Placed in the Public Domain. # -AUTHOR="supercop-20201130/crypto_kem/sntrup761/ref/implementors" -FILES=" - supercop-20201130/crypto_sort/int32/portable4/int32_minmax.inc - supercop-20201130/crypto_sort/int32/portable4/sort.c - supercop-20201130/crypto_sort/uint32/useint32/sort.c - supercop-20201130/crypto_kem/sntrup761/ref/uint32.c - supercop-20201130/crypto_kem/sntrup761/ref/int32.c - supercop-20201130/crypto_kem/sntrup761/ref/paramsmenu.h - supercop-20201130/crypto_kem/sntrup761/ref/params.h - supercop-20201130/crypto_kem/sntrup761/ref/Decode.h - supercop-20201130/crypto_kem/sntrup761/ref/Decode.c - supercop-20201130/crypto_kem/sntrup761/ref/Encode.h - supercop-20201130/crypto_kem/sntrup761/ref/Encode.c - supercop-20201130/crypto_kem/sntrup761/ref/kem.c +AUTHOR="supercop-20240808/crypto_kem/sntrup761/ref/implementors" +FILES=" supercop-20240808/cryptoint/crypto_int16.h + supercop-20240808/cryptoint/crypto_int32.h + supercop-20240808/cryptoint/crypto_int64.h + supercop-20240808/crypto_sort/int32/portable4/sort.c + supercop-20240808/crypto_sort/uint32/useint32/sort.c + supercop-20240808/crypto_kem/sntrup761/compact/kem.c " ### -set -e +set -euo pipefail cd $1 echo -n '/* $' echo 'OpenBSD: $ */' @@ -32,12 +25,19 @@ echo echo '#include ' echo '#include "crypto_api.h"' echo +echo '#define crypto_declassify(x, y) do {} while (0)' +echo # Map the types used in this code to the ones in crypto_api.h. We use #define # instead of typedef since some systems have existing intXX types and do not # permit multiple typedefs even if they do not conflict. for t in int8 uint8 int16 uint16 int32 uint32 int64 uint64; do echo "#define $t crypto_${t}" done + +for x in 16 32 64 ; do + echo "extern volatile crypto_int$x crypto_int${x}_optblocker;" +done + echo for i in $FILES; do echo "/* from $i */" @@ -57,14 +57,27 @@ for i in $FILES; do -e 's/[ ]*$//' \ $i | \ case "$i" in - # Use int64_t for intermediate values in int32_MINMAX to prevent signed - # 32-bit integer overflow when called by crypto_sort_uint32. - */int32_minmax.inc) - sed -e "s/int32 ab = b ^ a/int64_t ab = (int64_t)b ^ (int64_t)a/" \ - -e "s/int32 c = b - a/int64_t c = (int64_t)b - (int64_t)a/" + */cryptoint/crypto_int16.h) + sed -e "s/static void crypto_int16_store/void crypto_int16_store/" \ + -e "s/^[#]define crypto_int16_optblocker.*//" \ + -e "s/static void crypto_int16_minmax/void crypto_int16_minmax/" + ;; + */cryptoint/crypto_int32.h) + sed -e "s/static void crypto_int32_store/void crypto_int32_store/" \ + -e "s/^[#]define crypto_int32_optblocker.*//" \ + -e "s/static void crypto_int32_minmax/void crypto_int32_minmax/" + ;; + */cryptoint/crypto_int64.h) + sed -e "s/static void crypto_int64_store/void crypto_int64_store/" \ + -e "s/^[#]define crypto_int64_optblocker.*//" \ + -e "s/static void crypto_int64_minmax/void crypto_int64_minmax/" ;; */int32/portable4/sort.c) - sed -e "s/void crypto_sort/void crypto_sort_int32/g" + sed -e "s/void crypto_sort[(]/void crypto_sort_int32(/g" + ;; + */int32/portable5/sort.c) + sed -e "s/crypto_sort_smallindices/crypto_sort_int32_smallindices/"\ + -e "s/void crypto_sort[(]/void crypto_sort_int32(/g" ;; */uint32/useint32/sort.c) sed -e "s/void crypto_sort/void crypto_sort_uint32/g" diff --git a/usr.bin/ssh/srclimit.c b/usr.bin/ssh/srclimit.c index 8157ff028..bc747dc31 100644 --- a/usr.bin/ssh/srclimit.c +++ b/usr.bin/ssh/srclimit.c @@ -379,6 +379,10 @@ srclimit_penalise(struct xaddr *addr, int penalty_type) penalty_secs = penalty_cfg.penalty_noauth; reason = "penalty: connections without attempting authentication"; break; + case SRCLIMIT_PENALTY_REFUSECONNECTION: + penalty_secs = penalty_cfg.penalty_refuseconnection; + reason = "penalty: connection prohibited by RefuseConnection"; + break; case SRCLIMIT_PENALTY_GRACE_EXCEEDED: penalty_secs = penalty_cfg.penalty_crash; reason = "penalty: exceeded LoginGraceTime"; diff --git a/usr.bin/ssh/srclimit.h b/usr.bin/ssh/srclimit.h index 74a6f2b83..77d951ba6 100644 --- a/usr.bin/ssh/srclimit.h +++ b/usr.bin/ssh/srclimit.h @@ -22,16 +22,18 @@ void srclimit_init(int, int, int, int, int srclimit_check_allow(int, int); void srclimit_done(int); -#define SRCLIMIT_PENALTY_NONE 0 -#define SRCLIMIT_PENALTY_CRASH 1 -#define SRCLIMIT_PENALTY_AUTHFAIL 2 -#define SRCLIMIT_PENALTY_GRACE_EXCEEDED 3 -#define SRCLIMIT_PENALTY_NOAUTH 4 +#define SRCLIMIT_PENALTY_NONE 0 +#define SRCLIMIT_PENALTY_CRASH 1 +#define SRCLIMIT_PENALTY_AUTHFAIL 2 +#define SRCLIMIT_PENALTY_GRACE_EXCEEDED 3 +#define SRCLIMIT_PENALTY_NOAUTH 4 +#define SRCLIMIT_PENALTY_REFUSECONNECTION 5 /* meaningful exit values, used by sshd listener for penalties */ #define EXIT_LOGIN_GRACE 3 /* login grace period exceeded */ #define EXIT_CHILD_CRASH 4 /* preauth child crashed */ #define EXIT_AUTH_ATTEMPTED 5 /* at least one auth attempt made */ +#define EXIT_CONFIG_REFUSED 6 /* sshd_config RefuseConnection */ void srclimit_penalise(struct xaddr *, int); int srclimit_penalty_check_allow(int, const char **); diff --git a/usr.bin/ssh/ssh-keygen.c b/usr.bin/ssh/ssh-keygen.c index 71ad9a391..e384ccca6 100644 --- a/usr.bin/ssh/ssh-keygen.c +++ b/usr.bin/ssh/ssh-keygen.c @@ -1,4 +1,4 @@ -/* $OpenBSD: ssh-keygen.c,v 1.474 2024/09/04 05:33:34 djm Exp $ */ +/* $OpenBSD: ssh-keygen.c,v 1.475 2024/09/15 00:47:01 djm Exp $ */ /* * Author: Tatu Ylonen * Copyright (c) 1994 Tatu Ylonen , Espoo, Finland @@ -300,7 +300,7 @@ ask_filename(struct passwd *pw, const char *prompt) static struct sshkey * load_identity(const char *filename, char **commentp) { - char *pass; + char *prompt, *pass; struct sshkey *prv; int r; @@ -312,8 +312,11 @@ load_identity(const char *filename, char **commentp) fatal_r(r, "Load key \"%s\"", filename); if (identity_passphrase) pass = xstrdup(identity_passphrase); - else - pass = read_passphrase("Enter passphrase: ", RP_ALLOW_STDIN); + else { + xasprintf(&prompt, "Enter passphrase for \"%s\": ", filename); + pass = read_passphrase(prompt, RP_ALLOW_STDIN); + free(prompt); + } r = sshkey_load_private(filename, pass, &prv, commentp); freezero(pass, strlen(pass)); if (r != 0) @@ -3110,17 +3113,22 @@ read_check_passphrase(const char *prompt1, const char *prompt2, } static char * -private_key_passphrase(void) +private_key_passphrase(const char *path) { + char *prompt, *ret; + if (identity_passphrase) return xstrdup(identity_passphrase); if (identity_new_passphrase) return xstrdup(identity_new_passphrase); - return read_check_passphrase( - "Enter passphrase (empty for no passphrase): ", + xasprintf(&prompt, "Enter passphrase for \"%s\" " + "(empty for no passphrase): ", path); + ret = read_check_passphrase(prompt, "Enter same passphrase again: ", "Passphrases do not match. Try again."); + free(prompt); + return ret; } static char * @@ -3216,7 +3224,7 @@ do_download_sk(const char *skprovider, const char *device) /* Save the key with the application string as the comment */ if (pass == NULL) - pass = private_key_passphrase(); + pass = private_key_passphrase(path); if ((r = sshkey_save_private(key, path, pass, key->sk_application, private_key_format, openssh_format_cipher, rounds)) != 0) { @@ -3912,7 +3920,7 @@ main(int argc, char **argv) exit(1); /* Determine the passphrase for the private key */ - passphrase = private_key_passphrase(); + passphrase = private_key_passphrase(identity_file); if (identity_comment) { strlcpy(comment, identity_comment, sizeof(comment)); } else { diff --git a/usr.bin/ssh/sshd.8 b/usr.bin/ssh/sshd.8 index 28c5fa66f..ec5a0af77 100644 --- a/usr.bin/ssh/sshd.8 +++ b/usr.bin/ssh/sshd.8 @@ -33,8 +33,8 @@ .\" (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF .\" THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .\" -.\" $OpenBSD: sshd.8,v 1.326 2024/06/17 08:30:29 djm Exp $ -.Dd $Mdocdate: June 17 2024 $ +.\" $OpenBSD: sshd.8,v 1.327 2024/09/15 01:19:56 djm Exp $ +.Dd $Mdocdate: September 15 2024 $ .Dt SSHD 8 .Os .Sh NAME @@ -115,6 +115,10 @@ and .Dq rdomain and correspond to source address, user, resolved source host name, local address, local port number and routing domain respectively. +Additionally the +.Dq invalid-user +flag (which does not take a value argument) may be specified to simulate +a connection from an unrecognised username. .It Fl c Ar host_certificate_file Specifies a path to a certificate file to identify .Nm diff --git a/usr.bin/ssh/sshd.c b/usr.bin/ssh/sshd.c index 74b0fa16b..c02a7b964 100644 --- a/usr.bin/ssh/sshd.c +++ b/usr.bin/ssh/sshd.c @@ -1,4 +1,4 @@ -/* $OpenBSD: sshd.c,v 1.611 2024/09/12 00:36:27 djm Exp $ */ +/* $OpenBSD: sshd.c,v 1.612 2024/09/15 01:11:26 djm Exp $ */ /* * Copyright (c) 2000, 2001, 2002 Markus Friedl. All rights reserved. * Copyright (c) 2002 Niels Provos. All rights reserved. @@ -360,6 +360,13 @@ child_reap(struct early_child *child) (long)child->pid, child->id, child->early ? " (early)" : ""); break; + case EXIT_CONFIG_REFUSED: + penalty_type = SRCLIMIT_PENALTY_REFUSECONNECTION; + debug_f("preauth child %ld for %s prohibited by" + "RefuseConnection %s", + (long)child->pid, child->id, + child->early ? " (early)" : ""); + break; default: penalty_type = SRCLIMIT_PENALTY_NOAUTH; debug_f("preauth child %ld for %s exited " diff --git a/usr.bin/ssh/sshd_config.5 b/usr.bin/ssh/sshd_config.5 index 41675a123..eaf639fb0 100644 --- a/usr.bin/ssh/sshd_config.5 +++ b/usr.bin/ssh/sshd_config.5 @@ -33,8 +33,8 @@ .\" (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF .\" THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .\" -.\" $OpenBSD: sshd_config.5,v 1.370 2024/09/09 14:41:21 naddy Exp $ -.Dd $Mdocdate: September 9 2024 $ +.\" $OpenBSD: sshd_config.5,v 1.374 2024/09/15 08:27:38 jmc Exp $ +.Dd $Mdocdate: September 15 2024 $ .Dt SSHD_CONFIG 5 .Os .Sh NAME @@ -1238,9 +1238,11 @@ applied. .Pp The arguments to .Cm Match -are one or more criteria-pattern pairs or the single token -.Cm All -which matches all criteria. +are one or more criteria-pattern pairs or one of the single token criteria: +.Cm All , +which matches all criteria, or +.Cm Invalid-User , +which matches when the requested user-name does not match any known account. The available criteria are .Cm User , .Cm Group , @@ -1324,6 +1326,7 @@ Available keywords are .Cm PubkeyAcceptedAlgorithms , .Cm PubkeyAuthentication , .Cm PubkeyAuthOptions , +.Cm RefuseConnection , .Cm RekeyLimit , .Cm RevokedKeys , .Cm RDomain , @@ -1597,6 +1600,11 @@ Specifies how long to refuse clients that cause a crash of .It Cm authfail:duration Specifies how long to refuse clients that disconnect after making one or more unsuccessful authentication attempts (default: 5s). +.It Cm refuseconnection:duration +Specifies how long to refuse clients that were administratively prohibited +connection via the +.Cm RefuseConnection +option (default: 10s). .It Cm noauth:duration Specifies how long to refuse clients that disconnect without attempting authentication (default: 1s). @@ -1754,6 +1762,18 @@ options have any effect for other, non-FIDO, public key types. Specifies whether public key authentication is allowed. The default is .Cm yes . +.It Cm RefuseConnection +Indicates that +.Xr sshd 8 +should unconditionally terminate the connection. +Additionally, a +.Cm refuseconnection +penalty may be recorded against the source of the connection if +.Cm PerSourcePenalties +are enabled. +This option is only really useful in a +.Cm Match +block. .It Cm RekeyLimit Specifies the maximum amount of data that may be transmitted or received before the session key is renegotiated, optionally followed by a maximum diff --git a/usr.bin/w/w.c b/usr.bin/w/w.c index d5d508331..1c794993a 100644 --- a/usr.bin/w/w.c +++ b/usr.bin/w/w.c @@ -1,4 +1,4 @@ -/* $OpenBSD: w.c,v 1.69 2024/08/19 07:28:22 florian Exp $ */ +/* $OpenBSD: w.c,v 1.70 2024/09/15 07:14:58 jsg Exp $ */ /*- * Copyright (c) 1980, 1991, 1993, 1994 @@ -107,7 +107,6 @@ main(int argc, char *argv[]) struct kinfo_proc *kp; struct stat *stp; FILE *ut; - struct in_addr addr; int ch, i, nentries, nusers, wcmd; char *memf, *nlistf, *p, *x; char buf[HOST_NAME_MAX+1], errbuf[_POSIX2_LINE_MAX]; diff --git a/usr.sbin/radiusctl/parser.c b/usr.sbin/radiusctl/parser.c index 5c7efbc44..26626941c 100644 --- a/usr.sbin/radiusctl/parser.c +++ b/usr.sbin/radiusctl/parser.c @@ -1,4 +1,4 @@ -/* $OpenBSD: parser.c,v 1.5 2024/09/02 04:45:22 yasuoka Exp $ */ +/* $OpenBSD: parser.c,v 1.6 2024/09/15 05:26:05 yasuoka Exp $ */ /* * Copyright (c) 2010 Reyk Floeter @@ -158,6 +158,7 @@ static const struct token t_ipcp[] = { { KEYWORD, "dump", IPCP_DUMP, t_ipcp_flags }, { KEYWORD, "monitor", IPCP_MONITOR, t_ipcp_flags }, { KEYWORD, "disconnect", IPCP_DISCONNECT,t_ipcp_session_seq }, + { KEYWORD, "delete", IPCP_DELETE, t_ipcp_session_seq }, { ENDTOKEN, "", NONE, NULL } }; diff --git a/usr.sbin/radiusctl/parser.h b/usr.sbin/radiusctl/parser.h index 6fefb0f47..fb3e88db1 100644 --- a/usr.sbin/radiusctl/parser.h +++ b/usr.sbin/radiusctl/parser.h @@ -1,4 +1,4 @@ -/* $OpenBSD: parser.h,v 1.4 2024/07/24 08:27:20 yasuoka Exp $ */ +/* $OpenBSD: parser.h,v 1.5 2024/09/15 05:26:05 yasuoka Exp $ */ /* This file is derived from OpenBSD:src/usr.sbin/ikectl/parser.h 1.9 */ /* @@ -29,6 +29,7 @@ enum actions { IPCP_SHOW, IPCP_DUMP, IPCP_MONITOR, + IPCP_DELETE, IPCP_DISCONNECT }; diff --git a/usr.sbin/radiusctl/radiusctl.8 b/usr.sbin/radiusctl/radiusctl.8 index 00ab5bce2..43bb87632 100644 --- a/usr.sbin/radiusctl/radiusctl.8 +++ b/usr.sbin/radiusctl/radiusctl.8 @@ -1,4 +1,4 @@ -.\" $OpenBSD: radiusctl.8,v 1.9 2024/07/24 08:27:20 yasuoka Exp $ +.\" $OpenBSD: radiusctl.8,v 1.10 2024/09/15 05:26:05 yasuoka Exp $ .\" .\" Copyright (c) YASUOKA Masahiko .\" @@ -15,7 +15,7 @@ .\" OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. .\" .\" -.Dd $Mdocdate: July 24 2024 $ +.Dd $Mdocdate: September 15 2024 $ .Dt RADIUSCTL 8 .Os .Sh NAME @@ -114,6 +114,10 @@ shows the sessions in JSON format. .It Cm ipcp disconnect Ar sequence Request to disconnect the session specified by the .Ar sequence . +.It Cm ipcp delete Ar sequence +Request to delete the session specified by the +.Ar sequence +without requesting disconnection. .El .Sh EXAMPLES .Bd -literal -offset indent diff --git a/usr.sbin/radiusctl/radiusctl.c b/usr.sbin/radiusctl/radiusctl.c index 6b8a4fedb..40fec84a2 100644 --- a/usr.sbin/radiusctl/radiusctl.c +++ b/usr.sbin/radiusctl/radiusctl.c @@ -1,4 +1,4 @@ -/* $OpenBSD: radiusctl.c,v 1.12 2024/07/24 08:27:20 yasuoka Exp $ */ +/* $OpenBSD: radiusctl.c,v 1.13 2024/09/15 05:26:05 yasuoka Exp $ */ /* * Copyright (c) 2015 YASUOKA Masahiko * @@ -170,6 +170,7 @@ main(int argc, char *argv[]) IMSG_RADIUSD_MODULE_IPCP_MONITOR : IMSG_RADIUSD_MODULE_IPCP_DUMP, 0, 0, -1, iov, niov); break; + case IPCP_DELETE: case IPCP_DISCONNECT: memset(module_name, 0, sizeof(module_name)); strlcpy(module_name, "ipcp", @@ -178,8 +179,10 @@ main(int argc, char *argv[]) iov[niov++].iov_len = RADIUSD_MODULE_NAME_LEN; iov[niov].iov_base = &res->session_seq; iov[niov++].iov_len = sizeof(res->session_seq); - imsg_composev(&ibuf, IMSG_RADIUSD_MODULE_IPCP_DISCONNECT, 0, 0, - -1, iov, niov); + imsg_composev(&ibuf, + (res->action == IPCP_DELETE) + ? IMSG_RADIUSD_MODULE_IPCP_DELETE + : IMSG_RADIUSD_MODULE_IPCP_DISCONNECT, 0, 0, -1, iov, niov); break; } while (ibuf.w.queued) { @@ -199,6 +202,7 @@ main(int argc, char *argv[]) case IPCP_SHOW: case IPCP_DUMP: case IPCP_MONITOR: + case IPCP_DELETE: case IPCP_DISCONNECT: done = ipcp_handle_imsg(res, &imsg, cnt++); break; diff --git a/usr.sbin/radiusd/eap2mschap_local.h b/usr.sbin/radiusd/eap2mschap_local.h index b3d523d92..7db1e20ab 100644 --- a/usr.sbin/radiusd/eap2mschap_local.h +++ b/usr.sbin/radiusd/eap2mschap_local.h @@ -1,4 +1,4 @@ -/* $OpenBSD: eap2mschap_local.h,v 1.2 2024/07/16 06:18:20 miod Exp $ */ +/* $OpenBSD: eap2mschap_local.h,v 1.3 2024/09/15 05:49:05 jsg Exp $ */ /* * Copyright (c) 2024 Internet Initiative Japan Inc. @@ -70,7 +70,7 @@ struct eap_mschap_challenge { uint8_t chall[16]; char chap_name[0]; } __packed; -#if defined(__STDC_VERSION__) && __STDC_VERSION >= 201112L +#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L static_assert(sizeof(struct eap_mschap_challenge) == 26, ""); static_assert(offsetof(struct eap_mschap_challenge, chap) == 5, ""); static_assert(offsetof(struct eap_mschap_challenge, chall) == 10, ""); @@ -87,7 +87,7 @@ struct eap_mschap_response { uint8_t flags; uint8_t chap_name[0]; } __packed; -#if defined(__STDC_VERSION__) && __STDC_VERSION >= 201112L +#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L static_assert(sizeof(struct eap_mschap_response) == 59, ""); static_assert(offsetof(struct eap_mschap_response, chap) == 5, ""); static_assert(offsetof(struct eap_mschap_response, peerchall) == 10, ""); diff --git a/usr.sbin/radiusd/radiusd_eap2mschap.c b/usr.sbin/radiusd/radiusd_eap2mschap.c index 236f7785c..53b6ccf3e 100644 --- a/usr.sbin/radiusd/radiusd_eap2mschap.c +++ b/usr.sbin/radiusd/radiusd_eap2mschap.c @@ -1,4 +1,4 @@ -/* $OpenBSD: radiusd_eap2mschap.c,v 1.3 2024/08/16 09:52:16 yasuoka Exp $ */ +/* $OpenBSD: radiusd_eap2mschap.c,v 1.4 2024/09/15 05:31:23 yasuoka Exp $ */ /* * Copyright (c) 2024 Internet Initiative Japan Inc. @@ -427,19 +427,18 @@ eap_recv(struct eap2mschap *self, u_int q_id, RADIUS_PACKET *pkt) goto fail; case EAP_TYPE_MSCHAPV2: if (msgsiz < offsetof(struct eap, value[1])) { - log_warnx( - "q=%u EAP state=%s Received message has wrong in " - "size for EAP-MS-CHAPV2: received length %zu " - "eap.length=%u", q_id, hex_string(state, statesiz, - buf2, sizeof(buf2)), msgsiz, ntohs(eap->length)); + log_warnx("q=%u EAP state=%s Received message has " + "wrong in size for EAP-MS-CHAPV2: received length " + "%zu eap.length=%u", q_id, + hex_string(state, statesiz, buf2, sizeof(buf2)), + msgsiz, ntohs(eap->length)); goto fail; } req = eap_recv_mschap(self, req, pkt, (struct eap_chap *)eap); break; default: - log_warnx( - "q=%u EAP state=%s EAP unknown type=%u receieved.", + log_warnx("q=%u EAP state=%s EAP unknown type=%u receieved.", q_id, hex_string(state, statesiz, buf2, sizeof(buf2)), eap->value[0]); goto fail; @@ -476,9 +475,8 @@ eap_recv_mschap(struct eap2mschap *self, struct access_req *req, htons(resp->chap.length) < sizeof(struct eap_mschap_response) - offsetof(struct eap_mschap_response, chap)) { - log_warnx( - "q=%u EAP state=%s Received EAP message has wrong " - "in size: received length %zu eap.length=%u " + log_warnx("q=%u EAP state=%s Received EAP message has " + "wrong in size: received length %zu eap.length=%u " "chap.length=%u valuesize=%u", req->q_id, hex_string(req->state, sizeof(req->state), buf, sizeof(buf)), eapsiz, ntohs(resp->eap.length), diff --git a/usr.sbin/radiusd/radiusd_ipcp.c b/usr.sbin/radiusd/radiusd_ipcp.c index 4f0200f39..58a42c7b8 100644 --- a/usr.sbin/radiusd/radiusd_ipcp.c +++ b/usr.sbin/radiusd/radiusd_ipcp.c @@ -1,4 +1,4 @@ -/* $OpenBSD: radiusd_ipcp.c,v 1.14 2024/08/27 06:06:14 florian Exp $ */ +/* $OpenBSD: radiusd_ipcp.c,v 1.17 2024/09/15 05:31:23 yasuoka Exp $ */ /* * Copyright (c) 2024 Internet Initiative Japan Inc. @@ -122,8 +122,10 @@ struct module_ipcp_dae { struct sockaddr_in6 sin6; } nas_addr; struct event ev_sock; + struct event ev_reqs; TAILQ_ENTRY(module_ipcp_dae) next; TAILQ_HEAD(, assigned_ipv4) reqs; + int ninflight; }; struct module_ipcp { @@ -178,6 +180,8 @@ struct assigned_ipv4 struct in_addr); static struct assigned_ipv4 *ipcp_ipv4_find(struct module_ipcp *, struct in_addr); +static void ipcp_ipv4_delete(struct module_ipcp *, + struct assigned_ipv4 *, const char *); static void ipcp_ipv4_release(struct module_ipcp *, struct assigned_ipv4 *); static int assigned_ipv4_compar(struct assigned_ipv4 *, @@ -198,6 +202,7 @@ static void ipcp_dae_send_disconnect_request(struct assigned_ipv4 *); static void ipcp_dae_request_on_timeout(int, short, void *); static void ipcp_dae_on_event(int, short, void *); static void ipcp_dae_reset_request(struct assigned_ipv4 *); +static void ipcp_dae_send_pending_requests(int, short, void *); static struct ipcp_address *parse_address_range(const char *); static const char @@ -303,18 +308,20 @@ ipcp_start(void *ctx) TAILQ_FOREACH(dae, &self->daes, next) { if ((sock = socket(dae->nas_addr.sin4.sin_family, SOCK_DGRAM, IPPROTO_UDP)) == -1) { - log_warn("could not start dae: %s", strerror(errno)); + log_warn("%s: could not start dae: socket()", __func__); return; } if (connect(sock, (struct sockaddr *)&dae->nas_addr, dae->nas_addr.sin4.sin_len) == -1) { - log_warn("could not start dae: %s", strerror(errno)); + log_warn("%s: could not start dae: connect()", + __func__); return; } dae->sock = sock; event_set(&dae->ev_sock, sock, EV_READ | EV_PERSIST, ipcp_dae_on_event, dae); event_add(&dae->ev_sock, NULL); + evtimer_set(&dae->ev_reqs, ipcp_dae_send_pending_requests, dae); } module_send_message(self->base, IMSG_OK, NULL); @@ -334,6 +341,8 @@ ipcp_stop(void *ctx) close(dae->sock); dae->sock = -1; } + if (evtimer_pending(&dae->ev_reqs, NULL)) + event_del(&dae->ev_reqs); } if (evtimer_pending(&self->ev_timer, NULL)) evtimer_del(&self->ev_timer); @@ -624,10 +633,14 @@ ipcp_dispatch_control(void *ctx, struct imsg *imsg) freezero(dump ,dumpsiz); break; case IMSG_RADIUSD_MODULE_IPCP_DISCONNECT: + case IMSG_RADIUSD_MODULE_IPCP_DELETE: if (datalen < sizeof(unsigned)) { log_warn("%s: received " - "IMSG_RADIUSD_MODULE_IPCP_DISCONNECT message size " - "is wrong", __func__); + "%s message size is wrong", __func__, + (imsg->hdr.type == + IMSG_RADIUSD_MODULE_IPCP_DISCONNECT) + ? "IMSG_RADIUSD_MODULE_IPCP_DISCONNECT" + : "IMSG_RADIUSD_MODULE_IPCP_DELETE"); goto fail; } seq = *(unsigned *)imsg->data; @@ -640,12 +653,19 @@ ipcp_dispatch_control(void *ctx, struct imsg *imsg) } if (assign == NULL) { cause = "session not found"; - log_warnx("Disconnect seq=%u requested, but the " - "session is not found", seq); + log_warnx("%s seq=%u requested, but the " + "session is not found", + (imsg->hdr.type == + IMSG_RADIUSD_MODULE_IPCP_DISCONNECT)? "Disconnect" + : "Delete", seq); module_imsg_compose(self->base, IMSG_NG, imsg->hdr.peerid, 0, -1, cause, strlen(cause) + 1); - } - else { + } else if (imsg->hdr.type == IMSG_RADIUSD_MODULE_IPCP_DELETE) { + log_info("Delete seq=%u by request", assign->seq); + ipcp_ipv4_delete(self, assign, "By control"); + module_imsg_compose(self->base, IMSG_OK, + imsg->hdr.peerid, 0, -1, NULL, 0); + } else { if (assign->dae == NULL) log_warnx("Disconnect seq=%u requested, but " "DAE is not configured", assign->seq); @@ -1059,10 +1079,12 @@ ipcp_accounting_request(void *ctx, u_int q_id, const u_char *pkt, !IN6_ARE_ADDR_EQUAL(&assign->nas_ipv6, &nas_ipv6) || strcmp(assign->nas_id, nas_id) != 0) continue; - log_info("Delete record for %s", inet_ntop(AF_INET, - &assign->ipv4, buf, sizeof(buf))); - ipcp_del_db(self, assign); - ipcp_ipv4_release(self, assign); + log_info("q=%u Delete record for %s", q_id, + inet_ntop(AF_INET, &assign->ipv4, buf, + sizeof(buf))); + ipcp_ipv4_delete(self, assign, + (type == RADIUS_ACCT_STATUS_TYPE_ACCT_ON) + ? "Receive Acct-On" : "Receive Acct-Off"); } return; } @@ -1144,9 +1166,9 @@ ipcp_accounting_request(void *ctx, u_int q_id, const u_char *pkt, if (ipcp_notice_startstop(self, assign, 1, NULL) != 0) goto fail; - log_info("Start seq=%u user=%s duration=%dsec session=%s " - "tunnel=%s from=%s auth=%s ip=%s", assign->seq, - assign->user->name, delay, assign->session_id, + log_info("q=%u Start seq=%u user=%s duration=%dsec " + "session=%s tunnel=%s from=%s auth=%s ip=%s", q_id, + assign->seq, assign->user->name, delay, assign->session_id, assign->tun_type, print_addr((struct sockaddr *) &assign->tun_client, buf1, sizeof(buf1)), assign->auth_method, inet_ntop(AF_INET, &addr4, buf, @@ -1180,10 +1202,10 @@ ipcp_accounting_request(void *ctx, u_int q_id, const u_char *pkt, strlcpy(stat.cause, radius_terminate_cause_string(uval), sizeof(stat.cause)); - log_info("Stop seq=%u user=%s duration=%lldsec session=%s " - "tunnel=%s from=%s auth=%s ip=%s datain=%"PRIu64"bytes,%" - PRIu32"packets dataout=%"PRIu64"bytes,%"PRIu32"packets " - "cause=\"%s\"", + log_info("q=%u Stop seq=%u user=%s duration=%lldsec " + "session=%s tunnel=%s from=%s auth=%s ip=%s " + "datain=%"PRIu64"bytes,%" PRIu32"packets dataout=%"PRIu64 + "bytes,%"PRIu32"packets cause=\"%s\"", q_id, assign->seq, assign->user->name, dur.tv_sec, assign->session_id, assign->tun_type, print_addr( (struct sockaddr *)&assign->tun_client, buf1, sizeof(buf1)), @@ -1254,6 +1276,20 @@ ipcp_ipv4_find(struct module_ipcp *self, struct in_addr ina) return (ret); } +void +ipcp_ipv4_delete(struct module_ipcp *self, struct assigned_ipv4 *assign, + const char *cause) +{ + static struct radiusd_ipcp_statistics stat = { 0 }; + + memset(stat.cause, 0, sizeof(stat.cause)); + strlcpy(stat.cause, cause, sizeof(stat.cause)); + + ipcp_del_db(self, assign); + ipcp_notice_startstop(self, assign, 0, &stat); + ipcp_ipv4_release(self, assign); +} + void ipcp_ipv4_release(struct module_ipcp *self, struct assigned_ipv4 *assign) { @@ -1567,22 +1603,27 @@ ipcp_dae_send_disconnect_request(struct assigned_ipv4 *assign) radius_set_accounting_request_authenticator(reqpkt, assign->dae->secret); assign->dae_reqpkt = reqpkt; + TAILQ_INSERT_TAIL(&assign->dae->reqs, assign, dae_next); } if (assign->dae_ntry == 0) { + if (assign->dae->ninflight >= RADIUSD_IPCP_DAE_MAX_INFLIGHT) + return; log_info("Sending Disconnect-Request seq=%u to %s", assign->seq, print_addr((struct sockaddr *) &assign->dae->nas_addr, buf, sizeof(buf))); - TAILQ_INSERT_TAIL(&assign->dae->reqs, assign, dae_next); } if (radius_send(assign->dae->sock, assign->dae_reqpkt, 0) < 0) log_warn("%s: sendto: %m", __func__); - tv.tv_sec = dae_request_timeouts[assign->dae_ntry++]; + tv.tv_sec = dae_request_timeouts[assign->dae_ntry]; tv.tv_usec = 0; evtimer_set(&assign->dae_evtimer, ipcp_dae_request_on_timeout, assign); evtimer_add(&assign->dae_evtimer, &tv); + if (assign->dae_ntry == 0) + assign->dae->ninflight++; + assign->dae_ntry++; } void @@ -1625,7 +1666,7 @@ ipcp_dae_on_event(int fd, short ev, void *ctx) if ((radres = radius_recv(dae->sock, 0)) == NULL) { if (errno == EAGAIN) return; - log_warn("Failed to receive from %s", print_addr( + log_warn("%s: Failed to receive from %s", __func__, print_addr( (struct sockaddr *)&dae->nas_addr, buf, sizeof(buf))); return; } @@ -1634,16 +1675,16 @@ ipcp_dae_on_event(int fd, short ev, void *ctx) break; } if (assign == NULL) { - log_warnx("Received RADIUS packet from %s has unknown id=%d", - print_addr((struct sockaddr *)&dae->nas_addr, buf, - sizeof(buf)), radius_get_id(radres)); + log_warnx("%s: Received RADIUS packet from %s has unknown " + "id=%d", __func__, print_addr((struct sockaddr *) + &dae->nas_addr, buf, sizeof(buf)), radius_get_id(radres)); goto out; } radius_set_request_packet(radres, assign->dae_reqpkt); if ((radius_check_response_authenticator(radres, dae->secret)) != 0) { - log_warnx("Received RADIUS packet for seq=%u from %s has a bad " - "authenticator", assign->seq, print_addr( + log_warnx("%s: Received RADIUS packet for seq=%u from %s has " + "a bad authenticator", __func__, assign->seq, print_addr( (struct sockaddr *)&dae->nas_addr, buf, sizeof(buf))); goto out; @@ -1667,13 +1708,13 @@ ipcp_dae_on_event(int fd, short ev, void *ctx) &dae->nas_addr, buf, sizeof(buf)), cause); break; case RADIUS_CODE_DISCONNECT_NAK: - log_warnx("Received Disconnect-NAK for seq=%u from %s%s", + log_info("Received Disconnect-NAK for seq=%u from %s%s", assign->seq, print_addr((struct sockaddr *) &dae->nas_addr, buf, sizeof(buf)), cause); break; default: - log_warn("Received unknown code=%d for id=%u from %s", - code, assign->seq, print_addr((struct sockaddr *) + log_warn("%s: Received unknown code=%d for id=%u from %s", + __func__, code, assign->seq, print_addr((struct sockaddr *) &dae->nas_addr, buf, sizeof(buf))); break; } @@ -1700,10 +1741,16 @@ void ipcp_dae_reset_request(struct assigned_ipv4 *assign) { struct radiusctl_client *client, *clientt; + const struct timeval zero = { 0, 0 }; if (assign->dae != NULL) { - if (assign->dae_ntry > 0) + if (assign->dae_reqpkt != NULL) TAILQ_REMOVE(&assign->dae->reqs, assign, dae_next); + if (assign->dae_ntry > 0) { + assign->dae->ninflight--; + if (!evtimer_pending(&assign->dae->ev_reqs, NULL)) + evtimer_add(&assign->dae->ev_reqs, &zero); + } } if (assign->dae_reqpkt != NULL) radius_delete_packet(assign->dae_reqpkt); @@ -1717,6 +1764,23 @@ ipcp_dae_reset_request(struct assigned_ipv4 *assign) assign->dae_ntry = 0; } +void +ipcp_dae_send_pending_requests(int fd, short ev, void *ctx) +{ + struct module_ipcp_dae *dae = ctx; + struct module_ipcp *self = dae->ipcp; + struct assigned_ipv4 *assign, *assignt; + + ipcp_update_time(self); + + TAILQ_FOREACH_SAFE(assign, &dae->reqs, dae_next, assignt) { + if (dae->ninflight >= RADIUSD_IPCP_DAE_MAX_INFLIGHT) + break; + if (assign->dae_ntry == 0) /* pending */ + ipcp_dae_send_disconnect_request(assign); + } +} + /*********************************************************************** * Miscellaneous functions ***********************************************************************/ diff --git a/usr.sbin/radiusd/radiusd_ipcp.h b/usr.sbin/radiusd/radiusd_ipcp.h index e86bbd09e..85b820aa2 100644 --- a/usr.sbin/radiusd/radiusd_ipcp.h +++ b/usr.sbin/radiusd/radiusd_ipcp.h @@ -1,4 +1,4 @@ -/* $OpenBSD: radiusd_ipcp.h,v 1.1 2024/07/09 17:26:14 yasuoka Exp $ */ +/* $OpenBSD: radiusd_ipcp.h,v 1.3 2024/09/15 05:29:11 yasuoka Exp $ */ /* * Copyright (c) 2024 Internet Initiative Japan Inc. @@ -24,12 +24,15 @@ #include "radiusd.h" +#define RADIUSD_IPCP_DAE_MAX_INFLIGHT 64 + enum imsg_module_ipcp_type { IMSG_RADIUSD_MODULE_IPCP_DUMP = IMSG_RADIUSD_MODULE_MIN, IMSG_RADIUSD_MODULE_IPCP_MONITOR, IMSG_RADIUSD_MODULE_IPCP_DUMP_AND_MONITOR, IMSG_RADIUSD_MODULE_IPCP_START, IMSG_RADIUSD_MODULE_IPCP_STOP, + IMSG_RADIUSD_MODULE_IPCP_DELETE, IMSG_RADIUSD_MODULE_IPCP_DISCONNECT }; diff --git a/usr.sbin/radiusd/radiusd_module.c b/usr.sbin/radiusd/radiusd_module.c index 02b383962..a666051e8 100644 --- a/usr.sbin/radiusd/radiusd_module.c +++ b/usr.sbin/radiusd/radiusd_module.c @@ -1,4 +1,4 @@ -/* $OpenBSD: radiusd_module.c,v 1.19 2024/07/14 15:27:57 yasuoka Exp $ */ +/* $OpenBSD: radiusd_module.c,v 1.20 2024/09/15 05:14:32 yasuoka Exp $ */ /* * Copyright (c) 2015 YASUOKA Masahiko @@ -643,9 +643,13 @@ module_on_event(int fd, short evmask, void *ctx) if (ret > 0) continue; base->writeready = false; - if (ret == 0 && errno == EAGAIN) + if (ret == -1 && errno == EAGAIN) break; - syslog(LOG_ERR, "%s: msgbuf_write: %m", __func__); + if (ret == 0) + syslog(LOG_ERR, "%s: connection is closed", __func__); + else + syslog(LOG_ERR, "%s: msgbuf_write: %d %m", __func__, + ret); module_stop(base); return; }