sync with OpenBSD -current
This commit is contained in:
parent
20629a8b0d
commit
604988d5d3
58 changed files with 592 additions and 377 deletions
|
@ -1,4 +1,4 @@
|
|||
/* $OpenBSD: print.c,v 1.87 2024/01/16 19:08:37 deraadt Exp $ */
|
||||
/* $OpenBSD: print.c,v 1.88 2024/01/28 19:05:33 deraadt Exp $ */
|
||||
/* $NetBSD: print.c,v 1.27 1995/09/29 21:58:12 cgd Exp $ */
|
||||
|
||||
/*-
|
||||
|
@ -303,10 +303,6 @@ printstate(const struct pinfo *pi, VARENT *ve)
|
|||
*cp++ = '+';
|
||||
if (kp->p_psflags & PS_PLEDGE)
|
||||
*cp++ = 'p';
|
||||
if (kp->p_psflags & PS_PIN)
|
||||
*cp++ = 'l';
|
||||
if (kp->p_psflags & PS_LIBCPIN)
|
||||
*cp++ = 'L';
|
||||
if (kp->p_eflag & EPROC_UNVEIL) {
|
||||
if (kp->p_eflag & EPROC_LKUNVEIL)
|
||||
*cp++ = 'U';
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
.\" $OpenBSD: ps.1,v 1.132 2024/01/16 19:08:37 deraadt Exp $
|
||||
.\" $OpenBSD: ps.1,v 1.133 2024/01/28 19:05:33 deraadt Exp $
|
||||
.\" $NetBSD: ps.1,v 1.16 1996/03/21 01:36:28 jtc Exp $
|
||||
.\"
|
||||
.\" Copyright (c) 1980, 1990, 1991, 1993, 1994
|
||||
|
@ -30,7 +30,7 @@
|
|||
.\"
|
||||
.\" @(#)ps.1 8.3 (Berkeley) 4/18/94
|
||||
.\"
|
||||
.Dd $Mdocdate: January 16 2024 $
|
||||
.Dd $Mdocdate: January 28 2024 $
|
||||
.Dt PS 1
|
||||
.Os
|
||||
.Sh NAME
|
||||
|
@ -478,11 +478,6 @@ scheduling priority.
|
|||
.It p
|
||||
The process has called
|
||||
.Xr pledge 2 .
|
||||
.It l
|
||||
.Xr ld.so 1
|
||||
or a static executable has syscall pinning.
|
||||
.It L
|
||||
libc.so has syscall pinning.
|
||||
.\" .It S
|
||||
.\" The process has asked for FIFO
|
||||
.\" page replacement
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $OpenBSD: asn1_item.c,v 1.19 2024/01/13 13:59:18 joshua Exp $ */
|
||||
/* $OpenBSD: asn1_item.c,v 1.20 2024/01/28 14:43:48 joshua Exp $ */
|
||||
/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
|
||||
* All rights reserved.
|
||||
*
|
||||
|
@ -381,7 +381,7 @@ int
|
|||
ASN1_item_verify(const ASN1_ITEM *it, X509_ALGOR *a,
|
||||
ASN1_BIT_STRING *signature, void *asn, EVP_PKEY *pkey)
|
||||
{
|
||||
EVP_MD_CTX ctx;
|
||||
EVP_MD_CTX *md_ctx = NULL;
|
||||
unsigned char *in = NULL;
|
||||
int mdnid, pknid;
|
||||
int in_len = 0;
|
||||
|
@ -389,15 +389,16 @@ ASN1_item_verify(const ASN1_ITEM *it, X509_ALGOR *a,
|
|||
|
||||
if (pkey == NULL) {
|
||||
ASN1error(ERR_R_PASSED_NULL_PARAMETER);
|
||||
return -1;
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (signature->type == V_ASN1_BIT_STRING && signature->flags & 0x7) {
|
||||
ASN1error(ASN1_R_INVALID_BIT_STRING_BITS_LEFT);
|
||||
return -1;
|
||||
goto err;
|
||||
}
|
||||
|
||||
EVP_MD_CTX_init(&ctx);
|
||||
if ((md_ctx = EVP_MD_CTX_new()) == NULL)
|
||||
goto err;
|
||||
|
||||
/* Convert signature OID into digest and public key OIDs */
|
||||
if (!OBJ_find_sigid_algs(OBJ_obj2nid(a->algorithm), &mdnid, &pknid)) {
|
||||
|
@ -409,7 +410,7 @@ ASN1_item_verify(const ASN1_ITEM *it, X509_ALGOR *a,
|
|||
ASN1error(ASN1_R_UNKNOWN_SIGNATURE_ALGORITHM);
|
||||
goto err;
|
||||
}
|
||||
ret = pkey->ameth->item_verify(&ctx, it, asn, a,
|
||||
ret = pkey->ameth->item_verify(md_ctx, it, asn, a,
|
||||
signature, pkey);
|
||||
/* Return value of 2 means carry on, anything else means we
|
||||
* exit straight away: either a fatal error of the underlying
|
||||
|
@ -432,7 +433,7 @@ ASN1_item_verify(const ASN1_ITEM *it, X509_ALGOR *a,
|
|||
goto err;
|
||||
}
|
||||
|
||||
if (!EVP_DigestVerifyInit(&ctx, NULL, type, NULL, pkey)) {
|
||||
if (!EVP_DigestVerifyInit(md_ctx, NULL, type, NULL, pkey)) {
|
||||
ASN1error(ERR_R_EVP_LIB);
|
||||
ret = 0;
|
||||
goto err;
|
||||
|
@ -446,7 +447,7 @@ ASN1_item_verify(const ASN1_ITEM *it, X509_ALGOR *a,
|
|||
goto err;
|
||||
}
|
||||
|
||||
if (EVP_DigestVerify(&ctx, signature->data, signature->length,
|
||||
if (EVP_DigestVerify(md_ctx, signature->data, signature->length,
|
||||
in, in_len) <= 0) {
|
||||
ASN1error(ERR_R_EVP_LIB);
|
||||
ret = 0;
|
||||
|
@ -456,7 +457,7 @@ ASN1_item_verify(const ASN1_ITEM *it, X509_ALGOR *a,
|
|||
ret = 1;
|
||||
|
||||
err:
|
||||
EVP_MD_CTX_cleanup(&ctx);
|
||||
EVP_MD_CTX_free(md_ctx);
|
||||
freezero(in, in_len);
|
||||
|
||||
return ret;
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $OpenBSD: cmac.c,v 1.18 2023/12/18 21:15:00 tb Exp $ */
|
||||
/* $OpenBSD: cmac.c,v 1.20 2024/01/28 20:57:15 tb Exp $ */
|
||||
/* Written by Dr Stephen N Henson (steve@openssl.org) for the OpenSSL
|
||||
* project.
|
||||
*/
|
||||
|
@ -68,7 +68,7 @@
|
|||
* The temporary block tbl is a scratch buffer that holds intermediate secrets.
|
||||
*/
|
||||
struct CMAC_CTX_st {
|
||||
EVP_CIPHER_CTX cctx;
|
||||
EVP_CIPHER_CTX *cipher_ctx;
|
||||
unsigned char k1[EVP_MAX_BLOCK_LENGTH];
|
||||
unsigned char k2[EVP_MAX_BLOCK_LENGTH];
|
||||
unsigned char tbl[EVP_MAX_BLOCK_LENGTH];
|
||||
|
@ -112,19 +112,27 @@ CMAC_CTX_new(void)
|
|||
{
|
||||
CMAC_CTX *ctx;
|
||||
|
||||
ctx = malloc(sizeof(CMAC_CTX));
|
||||
if (!ctx)
|
||||
return NULL;
|
||||
EVP_CIPHER_CTX_init(&ctx->cctx);
|
||||
if ((ctx = calloc(1, sizeof(CMAC_CTX))) == NULL)
|
||||
goto err;
|
||||
if ((ctx->cipher_ctx = EVP_CIPHER_CTX_new()) == NULL)
|
||||
goto err;
|
||||
|
||||
ctx->nlast_block = -1;
|
||||
|
||||
return ctx;
|
||||
|
||||
err:
|
||||
CMAC_CTX_free(ctx);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
LCRYPTO_ALIAS(CMAC_CTX_new);
|
||||
|
||||
void
|
||||
CMAC_CTX_cleanup(CMAC_CTX *ctx)
|
||||
{
|
||||
EVP_CIPHER_CTX_cleanup(&ctx->cctx);
|
||||
if (ctx->cipher_ctx != NULL)
|
||||
EVP_CIPHER_CTX_reset(ctx->cipher_ctx);
|
||||
explicit_bzero(ctx->tbl, EVP_MAX_BLOCK_LENGTH);
|
||||
explicit_bzero(ctx->k1, EVP_MAX_BLOCK_LENGTH);
|
||||
explicit_bzero(ctx->k2, EVP_MAX_BLOCK_LENGTH);
|
||||
|
@ -136,7 +144,7 @@ LCRYPTO_ALIAS(CMAC_CTX_cleanup);
|
|||
EVP_CIPHER_CTX *
|
||||
CMAC_CTX_get0_cipher_ctx(CMAC_CTX *ctx)
|
||||
{
|
||||
return &ctx->cctx;
|
||||
return ctx->cipher_ctx;
|
||||
}
|
||||
LCRYPTO_ALIAS(CMAC_CTX_get0_cipher_ctx);
|
||||
|
||||
|
@ -147,7 +155,8 @@ CMAC_CTX_free(CMAC_CTX *ctx)
|
|||
return;
|
||||
|
||||
CMAC_CTX_cleanup(ctx);
|
||||
free(ctx);
|
||||
EVP_CIPHER_CTX_free(ctx->cipher_ctx);
|
||||
freezero(ctx, sizeof(CMAC_CTX));
|
||||
}
|
||||
LCRYPTO_ALIAS(CMAC_CTX_free);
|
||||
|
||||
|
@ -158,9 +167,9 @@ CMAC_CTX_copy(CMAC_CTX *out, const CMAC_CTX *in)
|
|||
|
||||
if (in->nlast_block == -1)
|
||||
return 0;
|
||||
if (!EVP_CIPHER_CTX_copy(&out->cctx, &in->cctx))
|
||||
if (!EVP_CIPHER_CTX_copy(out->cipher_ctx, in->cipher_ctx))
|
||||
return 0;
|
||||
block_size = EVP_CIPHER_CTX_block_size(&in->cctx);
|
||||
block_size = EVP_CIPHER_CTX_block_size(in->cipher_ctx);
|
||||
memcpy(out->k1, in->k1, block_size);
|
||||
memcpy(out->k2, in->k2, block_size);
|
||||
memcpy(out->tbl, in->tbl, block_size);
|
||||
|
@ -182,7 +191,7 @@ CMAC_Init(CMAC_CTX *ctx, const void *key, size_t keylen,
|
|||
/* Not initialised */
|
||||
if (ctx->nlast_block == -1)
|
||||
return 0;
|
||||
if (!EVP_EncryptInit_ex(&ctx->cctx, NULL, NULL, NULL, zero_iv))
|
||||
if (!EVP_EncryptInit_ex(ctx->cipher_ctx, NULL, NULL, NULL, zero_iv))
|
||||
return 0;
|
||||
explicit_bzero(ctx->tbl, sizeof(ctx->tbl));
|
||||
ctx->nlast_block = 0;
|
||||
|
@ -198,17 +207,17 @@ CMAC_Init(CMAC_CTX *ctx, const void *key, size_t keylen,
|
|||
*/
|
||||
if ((cipher->flags & EVP_CIPH_FLAG_CUSTOM_CIPHER) != 0)
|
||||
return 0;
|
||||
if (!EVP_EncryptInit_ex(&ctx->cctx, cipher, NULL, NULL, NULL))
|
||||
if (!EVP_EncryptInit_ex(ctx->cipher_ctx, cipher, NULL, NULL, NULL))
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Non-NULL key means initialisation is complete. */
|
||||
if (key != NULL) {
|
||||
if (EVP_CIPHER_CTX_cipher(&ctx->cctx) == NULL)
|
||||
if (EVP_CIPHER_CTX_cipher(ctx->cipher_ctx) == NULL)
|
||||
return 0;
|
||||
|
||||
/* make_kn() only supports block sizes of 8 and 16 bytes. */
|
||||
block_size = EVP_CIPHER_CTX_block_size(&ctx->cctx);
|
||||
block_size = EVP_CIPHER_CTX_block_size(ctx->cipher_ctx);
|
||||
if (block_size != 8 && block_size != 16)
|
||||
return 0;
|
||||
|
||||
|
@ -216,11 +225,11 @@ CMAC_Init(CMAC_CTX *ctx, const void *key, size_t keylen,
|
|||
* Section 6.1, step 1: store the intermediate secret CIPH_K(0)
|
||||
* in ctx->tbl.
|
||||
*/
|
||||
if (!EVP_CIPHER_CTX_set_key_length(&ctx->cctx, keylen))
|
||||
if (!EVP_CIPHER_CTX_set_key_length(ctx->cipher_ctx, keylen))
|
||||
return 0;
|
||||
if (!EVP_EncryptInit_ex(&ctx->cctx, NULL, NULL, key, zero_iv))
|
||||
if (!EVP_EncryptInit_ex(ctx->cipher_ctx, NULL, NULL, key, zero_iv))
|
||||
return 0;
|
||||
if (!EVP_Cipher(&ctx->cctx, ctx->tbl, zero_iv, block_size))
|
||||
if (!EVP_Cipher(ctx->cipher_ctx, ctx->tbl, zero_iv, block_size))
|
||||
return 0;
|
||||
|
||||
/* Section 6.1, step 2: compute k1 from intermediate secret. */
|
||||
|
@ -233,7 +242,7 @@ CMAC_Init(CMAC_CTX *ctx, const void *key, size_t keylen,
|
|||
ctx->nlast_block = 0;
|
||||
|
||||
/* Reset context again to get ready for the first data block. */
|
||||
if (!EVP_EncryptInit_ex(&ctx->cctx, NULL, NULL, NULL, zero_iv))
|
||||
if (!EVP_EncryptInit_ex(ctx->cipher_ctx, NULL, NULL, NULL, zero_iv))
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -251,7 +260,7 @@ CMAC_Update(CMAC_CTX *ctx, const void *in, size_t dlen)
|
|||
return 0;
|
||||
if (dlen == 0)
|
||||
return 1;
|
||||
block_size = EVP_CIPHER_CTX_block_size(&ctx->cctx);
|
||||
block_size = EVP_CIPHER_CTX_block_size(ctx->cipher_ctx);
|
||||
/* Copy into partial block if we need to */
|
||||
if (ctx->nlast_block > 0) {
|
||||
size_t nleft;
|
||||
|
@ -267,13 +276,13 @@ CMAC_Update(CMAC_CTX *ctx, const void *in, size_t dlen)
|
|||
return 1;
|
||||
data += nleft;
|
||||
/* Else not final block so encrypt it */
|
||||
if (!EVP_Cipher(&ctx->cctx, ctx->tbl, ctx->last_block,
|
||||
if (!EVP_Cipher(ctx->cipher_ctx, ctx->tbl, ctx->last_block,
|
||||
block_size))
|
||||
return 0;
|
||||
}
|
||||
/* Encrypt all but one of the complete blocks left */
|
||||
while (dlen > block_size) {
|
||||
if (!EVP_Cipher(&ctx->cctx, ctx->tbl, data, block_size))
|
||||
if (!EVP_Cipher(ctx->cipher_ctx, ctx->tbl, data, block_size))
|
||||
return 0;
|
||||
dlen -= block_size;
|
||||
data += block_size;
|
||||
|
@ -292,7 +301,7 @@ CMAC_Final(CMAC_CTX *ctx, unsigned char *out, size_t *poutlen)
|
|||
|
||||
if (ctx->nlast_block == -1)
|
||||
return 0;
|
||||
block_size = EVP_CIPHER_CTX_block_size(&ctx->cctx);
|
||||
block_size = EVP_CIPHER_CTX_block_size(ctx->cipher_ctx);
|
||||
*poutlen = (size_t)block_size;
|
||||
if (!out)
|
||||
return 1;
|
||||
|
@ -308,7 +317,7 @@ CMAC_Final(CMAC_CTX *ctx, unsigned char *out, size_t *poutlen)
|
|||
for (i = 0; i < block_size; i++)
|
||||
out[i] = ctx->last_block[i] ^ ctx->k2[i];
|
||||
}
|
||||
if (!EVP_Cipher(&ctx->cctx, out, out, block_size)) {
|
||||
if (!EVP_Cipher(ctx->cipher_ctx, out, out, block_size)) {
|
||||
explicit_bzero(out, block_size);
|
||||
return 0;
|
||||
}
|
||||
|
@ -327,6 +336,6 @@ CMAC_resume(CMAC_CTX *ctx)
|
|||
* So reinitialising using the last decrypted block will allow
|
||||
* CMAC to continue after calling CMAC_Final().
|
||||
*/
|
||||
return EVP_EncryptInit_ex(&ctx->cctx, NULL, NULL, NULL, ctx->tbl);
|
||||
return EVP_EncryptInit_ex(ctx->cipher_ctx, NULL, NULL, NULL, ctx->tbl);
|
||||
}
|
||||
LCRYPTO_ALIAS(CMAC_resume);
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $OpenBSD: conf_lib.c,v 1.15 2017/01/29 17:49:22 beck Exp $ */
|
||||
/* $OpenBSD: conf_lib.c,v 1.16 2024/01/28 21:00:54 tb Exp $ */
|
||||
/* Written by Richard Levitte (richard@levitte.org) for the OpenSSL
|
||||
* project 2000.
|
||||
*/
|
||||
|
@ -86,8 +86,8 @@ CONF_set_default_method(CONF_METHOD *meth)
|
|||
return 1;
|
||||
}
|
||||
|
||||
LHASH_OF(CONF_VALUE) *CONF_load(LHASH_OF(CONF_VALUE) *conf, const char *file,
|
||||
long *eline)
|
||||
LHASH_OF(CONF_VALUE) *
|
||||
CONF_load(LHASH_OF(CONF_VALUE) *conf, const char *file, long *eline)
|
||||
{
|
||||
LHASH_OF(CONF_VALUE) *ltmp;
|
||||
BIO *in = NULL;
|
||||
|
@ -104,8 +104,8 @@ LHASH_OF(CONF_VALUE) *CONF_load(LHASH_OF(CONF_VALUE) *conf, const char *file,
|
|||
return ltmp;
|
||||
}
|
||||
|
||||
LHASH_OF(CONF_VALUE) *CONF_load_fp(LHASH_OF(CONF_VALUE) *conf, FILE *fp,
|
||||
long *eline)
|
||||
LHASH_OF(CONF_VALUE) *
|
||||
CONF_load_fp(LHASH_OF(CONF_VALUE) *conf, FILE *fp, long *eline)
|
||||
{
|
||||
BIO *btmp;
|
||||
LHASH_OF(CONF_VALUE) *ltmp;
|
||||
|
@ -119,8 +119,8 @@ LHASH_OF(CONF_VALUE) *CONF_load_fp(LHASH_OF(CONF_VALUE) *conf, FILE *fp,
|
|||
return ltmp;
|
||||
}
|
||||
|
||||
LHASH_OF(CONF_VALUE) *CONF_load_bio(LHASH_OF(CONF_VALUE) *conf, BIO *bp,
|
||||
long *eline)
|
||||
LHASH_OF(CONF_VALUE) *
|
||||
CONF_load_bio(LHASH_OF(CONF_VALUE) *conf, BIO *bp, long *eline)
|
||||
{
|
||||
CONF ctmp;
|
||||
int ret;
|
||||
|
@ -133,8 +133,8 @@ LHASH_OF(CONF_VALUE) *CONF_load_bio(LHASH_OF(CONF_VALUE) *conf, BIO *bp,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
STACK_OF(CONF_VALUE) *CONF_get_section(LHASH_OF(CONF_VALUE) *conf,
|
||||
const char *section)
|
||||
STACK_OF(CONF_VALUE) *
|
||||
CONF_get_section(LHASH_OF(CONF_VALUE) *conf, const char *section)
|
||||
{
|
||||
if (conf == NULL) {
|
||||
return NULL;
|
||||
|
|
|
@ -69,8 +69,6 @@ Notes for some targets:
|
|||
- zlib doesn't work on HP-UX 9.05 with some versions of /bin/cc. It works with
|
||||
other compilers. Use "make test" to check your compiler.
|
||||
|
||||
- gzdopen is not supported on RISCOS or BEOS.
|
||||
|
||||
- For PalmOs, see http://palmzlib.sourceforge.net/
|
||||
|
||||
|
||||
|
|
|
@ -1667,10 +1667,10 @@ local block_state deflate_stored(deflate_state *s, int flush) {
|
|||
_tr_stored_block(s, (char *)0, 0L, last);
|
||||
|
||||
/* Replace the lengths in the dummy stored block with len. */
|
||||
s->pending_buf[s->pending - 4] = len;
|
||||
s->pending_buf[s->pending - 3] = len >> 8;
|
||||
s->pending_buf[s->pending - 2] = ~len;
|
||||
s->pending_buf[s->pending - 1] = ~len >> 8;
|
||||
s->pending_buf[s->pending - 4] = (Bytef)len;
|
||||
s->pending_buf[s->pending - 3] = (Bytef)(len >> 8);
|
||||
s->pending_buf[s->pending - 2] = (Bytef)~len;
|
||||
s->pending_buf[s->pending - 1] = (Bytef)(~len >> 8);
|
||||
|
||||
/* Write the stored block header bytes. */
|
||||
flush_pending(s->strm);
|
||||
|
|
|
@ -722,7 +722,7 @@ local void scan_tree(deflate_state *s, ct_data *tree, int max_code) {
|
|||
if (++count < max_count && curlen == nextlen) {
|
||||
continue;
|
||||
} else if (count < min_count) {
|
||||
s->bl_tree[curlen].Freq += count;
|
||||
s->bl_tree[curlen].Freq += (ush)count;
|
||||
} else if (curlen != 0) {
|
||||
if (curlen != prevlen) s->bl_tree[curlen].Freq++;
|
||||
s->bl_tree[REP_3_6].Freq++;
|
||||
|
|
|
@ -143,7 +143,7 @@ extern z_const char * const z_errmsg[10]; /* indexed by 2-zlib_error */
|
|||
# define OS_CODE 7
|
||||
#endif
|
||||
|
||||
#ifdef __acorn
|
||||
#if defined(__acorn) || defined(__riscos)
|
||||
# define OS_CODE 13
|
||||
#endif
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
#!/bin/ksh
|
||||
# $OpenBSD: ixp.sh,v 1.1 2023/10/12 09:18:56 claudio Exp $
|
||||
# $OpenBSD: ixp.sh,v 1.2 2024/01/28 12:36:21 anton Exp $
|
||||
|
||||
set -e
|
||||
|
||||
|
@ -43,6 +43,8 @@ if [ "$(id -u)" -ne 0 ]; then
|
|||
exit 1
|
||||
fi
|
||||
|
||||
. "${BGPDCONFIGDIR}/util.sh"
|
||||
|
||||
trap 'error_notify $?' EXIT
|
||||
|
||||
echo check if rdomains are busy
|
||||
|
@ -84,17 +86,15 @@ route -T ${RDOMAIN2} exec ${BGPD} \
|
|||
route -T ${RDOMAIN2} exec ${BGPD} \
|
||||
-v -f ${BGPDCONFIGDIR}/bgpd.ixp.rdomain2_4.conf
|
||||
|
||||
sleep 3
|
||||
|
||||
wait_until "route -T ${RDOMAIN1} exec bgpctl show rib detail | ! cmp /dev/null -"
|
||||
route -T ${RDOMAIN1} exec bgpctl show rib detail | grep -v 'Last update:' | \
|
||||
tee ixp.rdomain1.out
|
||||
sleep .2
|
||||
diff -u ${BGPDCONFIGDIR}/ixp.rdomain1.ok ixp.rdomain1.out
|
||||
echo OK
|
||||
|
||||
wait_until "route -T ${RDOMAIN2} exec bgpctl show rib detail | ! cmp /dev/null -"
|
||||
route -T ${RDOMAIN2} exec bgpctl show rib detail | grep -v 'Last update:' | \
|
||||
tee ixp.rdomain2.out
|
||||
sleep .2
|
||||
diff -u ${BGPDCONFIGDIR}/ixp.rdomain2.ok ixp.rdomain2.out
|
||||
echo OK
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
#!/bin/ksh
|
||||
# $OpenBSD: network_statement.sh,v 1.8 2023/02/15 14:19:08 claudio Exp $
|
||||
# $OpenBSD: network_statement.sh,v 1.9 2024/01/28 12:36:21 anton Exp $
|
||||
|
||||
set -e
|
||||
|
||||
|
@ -34,7 +34,6 @@ error_notify() {
|
|||
route -qn -T ${RDOMAIN2} flush || true
|
||||
ifconfig lo${RDOMAIN1} destroy || true
|
||||
ifconfig lo${RDOMAIN2} destroy || true
|
||||
rm -f ${TMP}
|
||||
if [ $1 -ne 0 ]; then
|
||||
echo FAILED
|
||||
exit 1
|
||||
|
@ -43,27 +42,14 @@ error_notify() {
|
|||
fi
|
||||
}
|
||||
|
||||
wait_until() {
|
||||
local _i=0
|
||||
|
||||
cat >"$TMP"
|
||||
while [ "$_i" -lt 8 ]; do
|
||||
sh -x "$TMP" && return 0
|
||||
sleep 0.5
|
||||
_i="$((_i + 1))"
|
||||
done
|
||||
echo timeout
|
||||
return 1
|
||||
}
|
||||
|
||||
if [ "$(id -u)" -ne 0 ]; then
|
||||
echo need root privileges >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
trap 'error_notify $?' EXIT
|
||||
. "${BGPDCONFIGDIR}/util.sh"
|
||||
|
||||
TMP="$(mktemp -t bgpd.XXXXXX)"
|
||||
trap 'error_notify $?' EXIT
|
||||
|
||||
echo check if rdomains are busy
|
||||
for n in ${RDOMAINS}; do
|
||||
|
@ -104,9 +90,7 @@ sleep 1
|
|||
route -T ${RDOMAIN1} exec bgpctl nei RDOMAIN2 up
|
||||
sleep 1
|
||||
|
||||
wait_until <<EOF
|
||||
route -T ${RDOMAIN1} exec bgpctl sh rib ${PAIR2STATIC} | grep -q ${PAIR2STATIC}
|
||||
EOF
|
||||
wait_until "route -T ${RDOMAIN1} exec bgpctl sh rib ${PAIR2STATIC} | grep -q ${PAIR2STATIC}"
|
||||
|
||||
echo test 1
|
||||
route -T ${RDOMAIN1} exec bgpctl sh rib ${PAIR2STATIC} | \
|
||||
|
@ -128,9 +112,7 @@ route -T ${RDOMAIN2} delete -label PAIR2RTABLE ${PAIR2RTABLE} \
|
|||
route -T ${RDOMAIN2} delete -priority 55 ${PAIR2PRIORITY} \
|
||||
${PAIR1IP}
|
||||
|
||||
wait_until <<EOF
|
||||
route -T ${RDOMAIN1} exec bgpctl sh rib ${PAIR2STATIC} | ! grep -q ${PAIR2STATIC}
|
||||
EOF
|
||||
wait_until "route -T ${RDOMAIN1} exec bgpctl sh rib ${PAIR2STATIC} | ! grep -q ${PAIR2STATIC}"
|
||||
|
||||
echo test 2
|
||||
route -T ${RDOMAIN1} exec bgpctl sh rib ${PAIR2STATIC} | \
|
||||
|
@ -150,9 +132,7 @@ route -T ${RDOMAIN2} add -label PAIR2RTABLE ${PAIR2RTABLE} \
|
|||
route -T ${RDOMAIN2} add -priority 55 ${PAIR2PRIORITY} \
|
||||
${PAIR1IP}
|
||||
|
||||
wait_until <<EOF
|
||||
route -T ${RDOMAIN1} exec bgpctl sh rib ${PAIR2STATIC} | grep -q ${PAIR2STATIC}
|
||||
EOF
|
||||
wait_until "route -T ${RDOMAIN1} exec bgpctl sh rib ${PAIR2STATIC} | grep -q ${PAIR2STATIC}"
|
||||
|
||||
echo test 3
|
||||
route -T ${RDOMAIN1} exec bgpctl sh rib ${PAIR2STATIC} | \
|
||||
|
|
11
regress/usr.sbin/bgpd/integrationtests/util.sh
Normal file
11
regress/usr.sbin/bgpd/integrationtests/util.sh
Normal file
|
@ -0,0 +1,11 @@
|
|||
wait_until() {
|
||||
local _i=0
|
||||
|
||||
while [ "$_i" -lt 8 ]; do
|
||||
sh -x -c "$*" && return 0
|
||||
sleep 0.5
|
||||
_i="$((_i + 1))"
|
||||
done
|
||||
echo timeout
|
||||
return 1
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
/* $OpenBSD: ipsecctl.c,v 1.86 2023/10/09 15:32:14 tobhe Exp $ */
|
||||
/* $OpenBSD: ipsecctl.c,v 1.87 2024/01/29 00:59:54 yasuoka Exp $ */
|
||||
/*
|
||||
* Copyright (c) 2004, 2005 Hans-Joerg Hoexer <hshoexer@openbsd.org>
|
||||
*
|
||||
|
@ -706,6 +706,10 @@ ipsecctl_show(int opts)
|
|||
}
|
||||
}
|
||||
|
||||
/* open /etc/{services,protocols} before pledge(2) */
|
||||
setservent(1);
|
||||
setprotoent(1);
|
||||
|
||||
if (pledge("stdio", NULL) == -1)
|
||||
err(1, "pledge");
|
||||
|
||||
|
@ -781,6 +785,10 @@ ipsecctl_show(int opts)
|
|||
ipsecctl_print_title("SAD:");
|
||||
printf("No entries\n");
|
||||
}
|
||||
|
||||
/* close /etc/{services,protocols} */
|
||||
endservent();
|
||||
endprotoent();
|
||||
}
|
||||
|
||||
int
|
||||
|
|
340
sys/dev/ic/qwx.c
340
sys/dev/ic/qwx.c
|
@ -1,4 +1,4 @@
|
|||
/* $OpenBSD: qwx.c,v 1.8 2024/01/25 17:00:20 stsp Exp $ */
|
||||
/* $OpenBSD: qwx.c,v 1.9 2024/01/28 22:30:39 stsp Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright 2023 Stefan Sperling <stsp@openbsd.org>
|
||||
|
@ -134,6 +134,8 @@ int qwx_qmi_event_server_arrive(struct qwx_softc *);
|
|||
int qwx_mac_register(struct qwx_softc *);
|
||||
int qwx_mac_start(struct qwx_softc *);
|
||||
void qwx_mac_scan_finish(struct qwx_softc *);
|
||||
int qwx_mac_mgmt_tx_wmi(struct qwx_softc *, struct qwx_vif *, uint8_t,
|
||||
struct mbuf *);
|
||||
int qwx_dp_tx_send_reo_cmd(struct qwx_softc *, struct dp_rx_tid *,
|
||||
enum hal_reo_cmd_type , struct ath11k_hal_reo_cmd *,
|
||||
void (*func)(struct qwx_dp *, void *, enum hal_reo_cmd_status));
|
||||
|
@ -343,13 +345,86 @@ qwx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
|
|||
return err;
|
||||
}
|
||||
|
||||
int
|
||||
qwx_tx(struct qwx_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
|
||||
{
|
||||
struct ieee80211_frame *wh;
|
||||
struct qwx_vif *arvif = TAILQ_FIRST(&sc->vif_list); /* XXX */
|
||||
uint8_t pdev_id = 0; /* TODO: derive pdev ID somehow? */
|
||||
uint8_t frame_type;
|
||||
|
||||
wh = mtod(m, struct ieee80211_frame *);
|
||||
frame_type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
|
||||
|
||||
if (frame_type == IEEE80211_FC0_TYPE_MGT)
|
||||
return qwx_mac_mgmt_tx_wmi(sc, arvif, pdev_id, m);
|
||||
|
||||
printf("%s: not implemented\n", sc->sc_dev.dv_xname);
|
||||
m_freem(m);
|
||||
return ENOTSUP;
|
||||
}
|
||||
|
||||
void
|
||||
qwx_start(struct ifnet *ifp)
|
||||
{
|
||||
struct qwx_softc *sc = ifp->if_softc;
|
||||
struct ieee80211com *ic = &sc->sc_ic;
|
||||
struct ieee80211_node *ni;
|
||||
struct ether_header *eh;
|
||||
struct mbuf *m;
|
||||
|
||||
if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
|
||||
return;
|
||||
|
||||
printf("%s: not implemented\n", __func__);
|
||||
for (;;) {
|
||||
/* why isn't this done per-queue? */
|
||||
if (sc->qfullmsk != 0) {
|
||||
ifq_set_oactive(&ifp->if_snd);
|
||||
break;
|
||||
}
|
||||
|
||||
/* need to send management frames even if we're not RUNning */
|
||||
m = mq_dequeue(&ic->ic_mgtq);
|
||||
if (m) {
|
||||
ni = m->m_pkthdr.ph_cookie;
|
||||
goto sendit;
|
||||
}
|
||||
|
||||
if (ic->ic_state != IEEE80211_S_RUN ||
|
||||
(ic->ic_xflags & IEEE80211_F_TX_MGMT_ONLY))
|
||||
break;
|
||||
|
||||
m = ifq_dequeue(&ifp->if_snd);
|
||||
if (!m)
|
||||
break;
|
||||
if (m->m_len < sizeof (*eh) &&
|
||||
(m = m_pullup(m, sizeof (*eh))) == NULL) {
|
||||
ifp->if_oerrors++;
|
||||
continue;
|
||||
}
|
||||
#if NBPFILTER > 0
|
||||
if (ifp->if_bpf != NULL)
|
||||
bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
|
||||
#endif
|
||||
if ((m = ieee80211_encap(ifp, m, &ni)) == NULL) {
|
||||
ifp->if_oerrors++;
|
||||
continue;
|
||||
}
|
||||
|
||||
sendit:
|
||||
#if NBPFILTER > 0
|
||||
if (ic->ic_rawbpf != NULL)
|
||||
bpf_mtap(ic->ic_rawbpf, m, BPF_DIRECTION_OUT);
|
||||
#endif
|
||||
if (qwx_tx(sc, m, ni) != 0) {
|
||||
ieee80211_release_node(ic, ni);
|
||||
ifp->if_oerrors++;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (ifp->if_flags & IFF_UP)
|
||||
ifp->if_timer = 1;
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -11646,6 +11721,91 @@ exit:
|
|||
#endif
|
||||
}
|
||||
|
||||
int
|
||||
qwx_pull_mgmt_tx_compl_param_tlv(struct qwx_softc *sc, struct mbuf *m,
|
||||
struct wmi_mgmt_tx_compl_event *param)
|
||||
{
|
||||
const void **tb;
|
||||
const struct wmi_mgmt_tx_compl_event *ev;
|
||||
int ret = 0;
|
||||
|
||||
tb = qwx_wmi_tlv_parse_alloc(sc, mtod(m, void *), m->m_pkthdr.len);
|
||||
if (tb == NULL) {
|
||||
ret = ENOMEM;
|
||||
printf("%s: failed to parse tlv: %d\n",
|
||||
sc->sc_dev.dv_xname, ret);
|
||||
return ENOMEM;
|
||||
}
|
||||
|
||||
ev = tb[WMI_TAG_MGMT_TX_COMPL_EVENT];
|
||||
if (!ev) {
|
||||
printf("%s: failed to fetch mgmt tx compl ev\n",
|
||||
sc->sc_dev.dv_xname);
|
||||
free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
|
||||
return EPROTO;
|
||||
}
|
||||
|
||||
param->pdev_id = ev->pdev_id;
|
||||
param->desc_id = ev->desc_id;
|
||||
param->status = ev->status;
|
||||
param->ack_rssi = ev->ack_rssi;
|
||||
|
||||
free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb));
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
qwx_wmi_process_mgmt_tx_comp(struct qwx_softc *sc,
|
||||
struct wmi_mgmt_tx_compl_event *tx_compl_param)
|
||||
{
|
||||
struct ieee80211com *ic = &sc->sc_ic;
|
||||
struct qwx_vif *arvif = TAILQ_FIRST(&sc->vif_list); /* XXX */
|
||||
struct ifnet *ifp = &ic->ic_if;
|
||||
struct qwx_tx_data *tx_data;
|
||||
|
||||
if (tx_compl_param->desc_id >= nitems(arvif->txmgmt.data)) {
|
||||
printf("%s: received mgmt tx compl for invalid buf_id: %d\n",
|
||||
sc->sc_dev.dv_xname, tx_compl_param->desc_id);
|
||||
return;
|
||||
}
|
||||
|
||||
tx_data = &arvif->txmgmt.data[tx_compl_param->desc_id];
|
||||
if (tx_data->m == NULL) {
|
||||
printf("%s: received mgmt tx compl for invalid buf_id: %d\n",
|
||||
sc->sc_dev.dv_xname, tx_compl_param->desc_id);
|
||||
return;
|
||||
}
|
||||
|
||||
bus_dmamap_unload(sc->sc_dmat, tx_data->map);
|
||||
m_freem(tx_data->m);
|
||||
tx_data->m = NULL;
|
||||
|
||||
if (arvif->txmgmt.queued > 0)
|
||||
arvif->txmgmt.queued--;
|
||||
|
||||
if (tx_compl_param->status != 0)
|
||||
ifp->if_oerrors++;
|
||||
}
|
||||
|
||||
void
|
||||
qwx_mgmt_tx_compl_event(struct qwx_softc *sc, struct mbuf *m)
|
||||
{
|
||||
struct wmi_mgmt_tx_compl_event tx_compl_param = { 0 };
|
||||
|
||||
if (qwx_pull_mgmt_tx_compl_param_tlv(sc, m, &tx_compl_param) != 0) {
|
||||
printf("%s: failed to extract mgmt tx compl event\n",
|
||||
sc->sc_dev.dv_xname);
|
||||
return;
|
||||
}
|
||||
|
||||
qwx_wmi_process_mgmt_tx_comp(sc, &tx_compl_param);
|
||||
|
||||
DNPRINTF(QWX_D_MGMT, "%s: event mgmt tx compl ev pdev_id %d, "
|
||||
"desc_id %d, status %d ack_rssi %d", __func__,
|
||||
tx_compl_param.pdev_id, tx_compl_param.desc_id,
|
||||
tx_compl_param.status, tx_compl_param.ack_rssi);
|
||||
}
|
||||
|
||||
void
|
||||
qwx_wmi_tlv_op_rx(struct qwx_softc *sc, struct mbuf *m)
|
||||
{
|
||||
|
@ -11696,11 +11856,9 @@ qwx_wmi_tlv_op_rx(struct qwx_softc *sc, struct mbuf *m)
|
|||
qwx_mgmt_rx_event(sc, m);
|
||||
/* mgmt_rx_event() owns the skb now! */
|
||||
return;
|
||||
#if 0
|
||||
case WMI_MGMT_TX_COMPLETION_EVENTID:
|
||||
ath11k_mgmt_tx_compl_event(ab, skb);
|
||||
qwx_mgmt_tx_compl_event(sc, m);
|
||||
break;
|
||||
#endif
|
||||
case WMI_SCAN_EVENTID:
|
||||
DPRINTF("%s: 0x%x: scan event\n", __func__, id);
|
||||
qwx_scan_event(sc, m);
|
||||
|
@ -15249,6 +15407,66 @@ qwx_wmi_set_sta_ps_param(struct qwx_softc *sc, uint32_t vdev_id,
|
|||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
qwx_wmi_mgmt_send(struct qwx_softc *sc, struct qwx_vif *arvif, uint8_t pdev_id,
|
||||
uint32_t buf_id, struct mbuf *frame, struct qwx_tx_data *tx_data)
|
||||
{
|
||||
struct qwx_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id];
|
||||
struct wmi_mgmt_send_cmd *cmd;
|
||||
struct wmi_tlv *frame_tlv;
|
||||
struct mbuf *m;
|
||||
uint32_t buf_len;
|
||||
int ret, len;
|
||||
uint64_t paddr;
|
||||
|
||||
paddr = tx_data->map->dm_segs[0].ds_addr;
|
||||
|
||||
buf_len = frame->m_pkthdr.len < WMI_MGMT_SEND_DOWNLD_LEN ?
|
||||
frame->m_pkthdr.len : WMI_MGMT_SEND_DOWNLD_LEN;
|
||||
|
||||
len = sizeof(*cmd) + sizeof(*frame_tlv) + roundup(buf_len, 4);
|
||||
|
||||
m = qwx_wmi_alloc_mbuf(len);
|
||||
if (!m)
|
||||
return ENOMEM;
|
||||
|
||||
cmd = (struct wmi_mgmt_send_cmd *)(mtod(m, uint8_t *) +
|
||||
sizeof(struct ath11k_htc_hdr) + sizeof(struct wmi_cmd_hdr));
|
||||
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_MGMT_TX_SEND_CMD) |
|
||||
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
|
||||
cmd->vdev_id = arvif->vdev_id;
|
||||
cmd->desc_id = buf_id;
|
||||
cmd->chanfreq = 0;
|
||||
cmd->paddr_lo = paddr & 0xffffffff;
|
||||
cmd->paddr_hi = paddr >> 32;
|
||||
cmd->frame_len = frame->m_pkthdr.len;
|
||||
cmd->buf_len = buf_len;
|
||||
cmd->tx_params_valid = 0;
|
||||
|
||||
frame_tlv = (struct wmi_tlv *)(mtod(m, uint8_t *) +
|
||||
sizeof(struct ath11k_htc_hdr) + sizeof(struct wmi_cmd_hdr) +
|
||||
sizeof(*cmd));
|
||||
frame_tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
|
||||
FIELD_PREP(WMI_TLV_LEN, buf_len);
|
||||
|
||||
memcpy(frame_tlv->value, mtod(frame, void *), buf_len);
|
||||
#if 0 /* Not needed on OpenBSD? */
|
||||
ath11k_ce_byte_swap(frame_tlv->value, buf_len);
|
||||
#endif
|
||||
ret = qwx_wmi_cmd_send(wmi, m, WMI_MGMT_TX_SEND_CMDID);
|
||||
if (ret) {
|
||||
printf("%s: failed to submit WMI_MGMT_TX_SEND_CMDID cmd\n",
|
||||
sc->sc_dev.dv_xname);
|
||||
m_freem(m);
|
||||
return ret;
|
||||
}
|
||||
|
||||
DNPRINTF(QWX_D_WMI, "%s: cmd mgmt tx send", __func__);
|
||||
|
||||
tx_data->m = frame;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
qwx_wmi_vdev_create(struct qwx_softc *sc, uint8_t *macaddr,
|
||||
struct vdev_create_params *param)
|
||||
|
@ -18710,6 +18928,61 @@ qwx_mac_vdev_start(struct qwx_softc *sc, struct qwx_vif *arvif, int pdev_id)
|
|||
return qwx_mac_vdev_start_restart(sc, arvif, pdev_id, 0);
|
||||
}
|
||||
|
||||
void
|
||||
qwx_vif_free(struct qwx_softc *sc, struct qwx_vif *arvif)
|
||||
{
|
||||
struct qwx_txmgmt_queue *txmgmt;
|
||||
int i;
|
||||
|
||||
if (arvif == NULL)
|
||||
return;
|
||||
|
||||
txmgmt = &arvif->txmgmt;
|
||||
for (i = 0; i < nitems(txmgmt->data); i++) {
|
||||
struct qwx_tx_data *tx_data = &txmgmt->data[i];
|
||||
|
||||
if (tx_data->m) {
|
||||
m_freem(tx_data->m);
|
||||
tx_data->m = NULL;
|
||||
}
|
||||
if (tx_data->map) {
|
||||
bus_dmamap_destroy(sc->sc_dmat, tx_data->map);
|
||||
tx_data->map = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
free(arvif, M_DEVBUF, sizeof(*arvif));
|
||||
}
|
||||
|
||||
struct qwx_vif *
|
||||
qwx_vif_alloc(struct qwx_softc *sc)
|
||||
{
|
||||
struct qwx_vif *arvif;
|
||||
struct qwx_txmgmt_queue *txmgmt;
|
||||
int i, ret = 0;
|
||||
const bus_size_t size = IEEE80211_MAX_LEN;
|
||||
|
||||
arvif = malloc(sizeof(*arvif), M_DEVBUF, M_NOWAIT | M_ZERO);
|
||||
if (arvif == NULL)
|
||||
return NULL;
|
||||
|
||||
txmgmt = &arvif->txmgmt;
|
||||
for (i = 0; i < nitems(txmgmt->data); i++) {
|
||||
struct qwx_tx_data *tx_data = &txmgmt->data[i];
|
||||
|
||||
ret = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
|
||||
BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &tx_data->map);
|
||||
if (ret) {
|
||||
qwx_vif_free(sc, arvif);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
arvif->sc = sc;
|
||||
|
||||
return arvif;
|
||||
}
|
||||
|
||||
int
|
||||
qwx_mac_op_add_interface(struct qwx_pdev *pdev)
|
||||
{
|
||||
|
@ -18746,13 +19019,11 @@ qwx_mac_op_add_interface(struct qwx_pdev *pdev)
|
|||
goto err;
|
||||
}
|
||||
|
||||
arvif = malloc(sizeof(*arvif), M_DEVBUF, M_NOWAIT | M_ZERO);
|
||||
arvif = qwx_vif_alloc(sc);
|
||||
if (arvif == NULL) {
|
||||
ret = ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
|
||||
arvif->sc = sc;
|
||||
#if 0
|
||||
INIT_DELAYED_WORK(&arvif->connection_loss_work,
|
||||
ath11k_mac_vif_sta_connection_loss_work);
|
||||
|
@ -18983,7 +19254,7 @@ err:
|
|||
#ifdef notyet
|
||||
mutex_unlock(&ar->conf_mutex);
|
||||
#endif
|
||||
free(arvif, M_DEVBUF, sizeof(*arvif));
|
||||
qwx_vif_free(sc, arvif);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -19907,6 +20178,57 @@ free_peer:
|
|||
return ret;
|
||||
}
|
||||
|
||||
int
|
||||
qwx_mac_mgmt_tx_wmi(struct qwx_softc *sc, struct qwx_vif *arvif,
|
||||
uint8_t pdev_id, struct mbuf *m)
|
||||
{
|
||||
struct qwx_txmgmt_queue *txmgmt = &arvif->txmgmt;
|
||||
struct qwx_tx_data *tx_data;
|
||||
int buf_id;
|
||||
int ret;
|
||||
|
||||
buf_id = txmgmt->cur;
|
||||
|
||||
DNPRINTF(QWX_D_MAC, "%s: tx mgmt frame, buf id %d\n", __func__, buf_id);
|
||||
|
||||
if (txmgmt->queued >= nitems(txmgmt->data))
|
||||
return ENOSPC;
|
||||
|
||||
tx_data = &txmgmt->data[buf_id];
|
||||
#if 0
|
||||
if (!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP)) {
|
||||
if ((ieee80211_is_action(hdr->frame_control) ||
|
||||
ieee80211_is_deauth(hdr->frame_control) ||
|
||||
ieee80211_is_disassoc(hdr->frame_control)) &&
|
||||
ieee80211_has_protected(hdr->frame_control)) {
|
||||
skb_put(skb, IEEE80211_CCMP_MIC_LEN);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
ret = bus_dmamap_load_mbuf(sc->sc_dmat, tx_data->map,
|
||||
m, BUS_DMA_WRITE | BUS_DMA_NOWAIT);
|
||||
if (ret) {
|
||||
printf("%s: failed to map mgmt Tx buffer: %d\n",
|
||||
sc->sc_dev.dv_xname, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = qwx_wmi_mgmt_send(sc, arvif, pdev_id, buf_id, m, tx_data);
|
||||
if (ret) {
|
||||
printf("%s: failed to send mgmt frame: %d\n",
|
||||
sc->sc_dev.dv_xname, ret);
|
||||
goto err_unmap_buf;
|
||||
}
|
||||
|
||||
txmgmt->cur = (txmgmt->cur + 1) % nitems(txmgmt->data);
|
||||
txmgmt->queued++;
|
||||
return 0;
|
||||
|
||||
err_unmap_buf:
|
||||
bus_dmamap_unload(sc->sc_dmat, tx_data->map);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void
|
||||
qwx_wmi_start_scan_init(struct qwx_softc *sc, struct scan_req_params *arg)
|
||||
{
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $OpenBSD: qwxreg.h,v 1.2 2024/01/25 10:11:04 stsp Exp $ */
|
||||
/* $OpenBSD: qwxreg.h,v 1.3 2024/01/28 22:30:39 stsp Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc.
|
||||
|
@ -3857,12 +3857,6 @@ struct wmi_scan_prob_req_oui_cmd {
|
|||
#define WMI_TX_PARAMS_DWORD1_FRAME_TYPE BIT(20)
|
||||
#define WMI_TX_PARAMS_DWORD1_RSVD GENMASK(31, 21)
|
||||
|
||||
struct wmi_mgmt_send_params {
|
||||
uint32_t tlv_header;
|
||||
uint32_t tx_params_dword0;
|
||||
uint32_t tx_params_dword1;
|
||||
};
|
||||
|
||||
struct wmi_mgmt_send_cmd {
|
||||
uint32_t tlv_header;
|
||||
uint32_t vdev_id;
|
||||
|
@ -3874,9 +3868,13 @@ struct wmi_mgmt_send_cmd {
|
|||
uint32_t buf_len;
|
||||
uint32_t tx_params_valid;
|
||||
|
||||
/* This TLV is followed by struct wmi_mgmt_frame */
|
||||
|
||||
/* Followed by struct wmi_mgmt_send_params */
|
||||
/*
|
||||
* Followed by struct wmi_tlv and buf_len bytes of frame data with
|
||||
* buf_len <= WMI_MGMT_SEND_DOWNLD_LEN, which may be exceeded by
|
||||
* frame_len. The full frame is mapped at paddr_lo/hi.
|
||||
* Presumably the idea is that small frames can skip the extra DMA
|
||||
* transfer of frame data after the command has been transferred.
|
||||
*/
|
||||
} __packed;
|
||||
|
||||
struct wmi_sta_powersave_mode_cmd {
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $OpenBSD: qwxvar.h,v 1.5 2024/01/25 17:00:21 stsp Exp $ */
|
||||
/* $OpenBSD: qwxvar.h,v 1.6 2024/01/28 22:30:39 stsp Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 2018-2019 The Linux Foundation.
|
||||
|
@ -718,7 +718,7 @@ struct qwx_tx_data {
|
|||
uint8_t eid;
|
||||
uint8_t flags;
|
||||
uint32_t cipher;
|
||||
} __packed;
|
||||
};
|
||||
|
||||
struct qwx_ce_ring {
|
||||
/* Number of entries in this ring; must be power of 2 */
|
||||
|
@ -1468,6 +1468,12 @@ struct qwx_pdev_dp {
|
|||
struct qwx_mon_data mon_data;
|
||||
};
|
||||
|
||||
struct qwx_txmgmt_queue {
|
||||
struct qwx_tx_data data[8];
|
||||
int cur;
|
||||
int queued;
|
||||
};
|
||||
|
||||
struct qwx_vif {
|
||||
uint32_t vdev_id;
|
||||
enum wmi_vdev_type vdev_type;
|
||||
|
@ -1528,6 +1534,8 @@ struct qwx_vif {
|
|||
#ifdef CONFIG_ATH11K_DEBUGFS
|
||||
struct dentry *debugfs_twt;
|
||||
#endif /* CONFIG_ATH11K_DEBUGFS */
|
||||
|
||||
struct qwx_txmgmt_queue txmgmt;
|
||||
};
|
||||
|
||||
TAILQ_HEAD(qwx_vif_list, qwx_vif);
|
||||
|
@ -1591,6 +1599,7 @@ struct qwx_softc {
|
|||
int have_firmware;
|
||||
|
||||
int sc_tx_timer;
|
||||
uint32_t qfullmsk;
|
||||
|
||||
bus_addr_t mem;
|
||||
struct ath11k_hw_params hw_params;
|
||||
|
|
|
@ -755,7 +755,7 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
|
|||
int r;
|
||||
|
||||
if (!adev->smc_rreg)
|
||||
return -EPERM;
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (size & 0x3 || *pos & 0x3)
|
||||
return -EINVAL;
|
||||
|
@ -814,7 +814,7 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *
|
|||
int r;
|
||||
|
||||
if (!adev->smc_wreg)
|
||||
return -EPERM;
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (size & 0x3 || *pos & 0x3)
|
||||
return -EINVAL;
|
||||
|
|
|
@ -1026,8 +1026,13 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
|||
if (amdgpu_dpm_read_sensor(adev,
|
||||
AMDGPU_PP_SENSOR_GPU_AVG_POWER,
|
||||
(void *)&ui32, &ui32_size)) {
|
||||
/* fall back to input power for backwards compat */
|
||||
if (amdgpu_dpm_read_sensor(adev,
|
||||
AMDGPU_PP_SENSOR_GPU_INPUT_POWER,
|
||||
(void *)&ui32, &ui32_size)) {
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
ui32 >>= 8;
|
||||
break;
|
||||
case AMDGPU_INFO_SENSOR_VDDNB:
|
||||
|
|
|
@ -330,12 +330,6 @@ static void kfd_init_apertures_vi(struct kfd_process_device *pdd, uint8_t id)
|
|||
pdd->gpuvm_limit =
|
||||
pdd->dev->kfd->shared_resources.gpuvm_size - 1;
|
||||
|
||||
/* dGPUs: the reserved space for kernel
|
||||
* before SVM
|
||||
*/
|
||||
pdd->qpd.cwsr_base = SVM_CWSR_BASE;
|
||||
pdd->qpd.ib_base = SVM_IB_BASE;
|
||||
|
||||
pdd->scratch_base = MAKE_SCRATCH_APP_BASE_VI();
|
||||
pdd->scratch_limit = MAKE_SCRATCH_APP_LIMIT(pdd->scratch_base);
|
||||
}
|
||||
|
@ -345,18 +339,18 @@ static void kfd_init_apertures_v9(struct kfd_process_device *pdd, uint8_t id)
|
|||
pdd->lds_base = MAKE_LDS_APP_BASE_V9();
|
||||
pdd->lds_limit = MAKE_LDS_APP_LIMIT(pdd->lds_base);
|
||||
|
||||
pdd->gpuvm_base = PAGE_SIZE;
|
||||
/* Raven needs SVM to support graphic handle, etc. Leave the small
|
||||
* reserved space before SVM on Raven as well, even though we don't
|
||||
* have to.
|
||||
* Set gpuvm_base and gpuvm_limit to CANONICAL addresses so that they
|
||||
* are used in Thunk to reserve SVM.
|
||||
*/
|
||||
pdd->gpuvm_base = SVM_USER_BASE;
|
||||
pdd->gpuvm_limit =
|
||||
pdd->dev->kfd->shared_resources.gpuvm_size - 1;
|
||||
|
||||
pdd->scratch_base = MAKE_SCRATCH_APP_BASE_V9();
|
||||
pdd->scratch_limit = MAKE_SCRATCH_APP_LIMIT(pdd->scratch_base);
|
||||
|
||||
/*
|
||||
* Place TBA/TMA on opposite side of VM hole to prevent
|
||||
* stray faults from triggering SVM on these pages.
|
||||
*/
|
||||
pdd->qpd.cwsr_base = pdd->dev->kfd->shared_resources.gpuvm_size;
|
||||
}
|
||||
|
||||
int kfd_init_apertures(struct kfd_process *process)
|
||||
|
@ -413,6 +407,12 @@ int kfd_init_apertures(struct kfd_process *process)
|
|||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
/* dGPUs: the reserved space for kernel
|
||||
* before SVM
|
||||
*/
|
||||
pdd->qpd.cwsr_base = SVM_CWSR_BASE;
|
||||
pdd->qpd.ib_base = SVM_IB_BASE;
|
||||
}
|
||||
|
||||
dev_dbg(kfd_device, "node id %u\n", id);
|
||||
|
|
|
@ -1021,7 +1021,7 @@ int kgd2kfd_init_zone_device(struct amdgpu_device *adev)
|
|||
} else {
|
||||
res = devm_request_free_mem_region(adev->dev, &iomem_resource, size);
|
||||
if (IS_ERR(res))
|
||||
return -ENOMEM;
|
||||
return PTR_ERR(res);
|
||||
pgmap->range.start = res->start;
|
||||
pgmap->range.end = res->end;
|
||||
pgmap->type = MEMORY_DEVICE_PRIVATE;
|
||||
|
@ -1037,10 +1037,10 @@ int kgd2kfd_init_zone_device(struct amdgpu_device *adev)
|
|||
r = devm_memremap_pages(adev->dev, pgmap);
|
||||
if (IS_ERR(r)) {
|
||||
pr_err("failed to register HMM device memory\n");
|
||||
/* Disable SVM support capability */
|
||||
pgmap->type = 0;
|
||||
if (pgmap->type == MEMORY_DEVICE_PRIVATE)
|
||||
devm_release_mem_region(adev->dev, res->start, resource_size(res));
|
||||
/* Disable SVM support capability */
|
||||
pgmap->type = 0;
|
||||
return PTR_ERR(r);
|
||||
}
|
||||
|
||||
|
|
|
@ -971,7 +971,7 @@ struct kfd_process {
|
|||
struct work_struct debug_event_workarea;
|
||||
|
||||
/* Tracks debug per-vmid request for debug flags */
|
||||
bool dbg_flags;
|
||||
u32 dbg_flags;
|
||||
|
||||
atomic_t poison;
|
||||
/* Queues are in paused stated because we are in the process of doing a CRIU checkpoint */
|
||||
|
|
|
@ -1342,10 +1342,11 @@ static int kfd_create_indirect_link_prop(struct kfd_topology_device *kdev, int g
|
|||
num_cpu++;
|
||||
}
|
||||
|
||||
if (list_empty(&kdev->io_link_props))
|
||||
return -ENODATA;
|
||||
|
||||
gpu_link = list_first_entry(&kdev->io_link_props,
|
||||
struct kfd_iolink_properties, list);
|
||||
if (!gpu_link)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < num_cpu; i++) {
|
||||
/* CPU <--> GPU */
|
||||
|
@ -1423,15 +1424,17 @@ static int kfd_add_peer_prop(struct kfd_topology_device *kdev,
|
|||
peer->gpu->adev))
|
||||
return ret;
|
||||
|
||||
if (list_empty(&kdev->io_link_props))
|
||||
return -ENODATA;
|
||||
|
||||
iolink1 = list_first_entry(&kdev->io_link_props,
|
||||
struct kfd_iolink_properties, list);
|
||||
if (!iolink1)
|
||||
return -ENOMEM;
|
||||
|
||||
if (list_empty(&peer->io_link_props))
|
||||
return -ENODATA;
|
||||
|
||||
iolink2 = list_first_entry(&peer->io_link_props,
|
||||
struct kfd_iolink_properties, list);
|
||||
if (!iolink2)
|
||||
return -ENOMEM;
|
||||
|
||||
props = kfd_alloc_struct(props);
|
||||
if (!props)
|
||||
|
|
|
@ -6886,7 +6886,7 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
|
|||
max_bpc);
|
||||
bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
|
||||
clock = adjusted_mode->clock;
|
||||
dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
|
||||
dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp << 4);
|
||||
}
|
||||
|
||||
dm_new_connector_state->vcpi_slots =
|
||||
|
|
|
@ -1636,7 +1636,7 @@ enum dc_status dm_dp_mst_is_port_support_mode(
|
|||
} else {
|
||||
/* check if mode could be supported within full_pbn */
|
||||
bpp = convert_dc_color_depth_into_bpc(stream->timing.display_color_depth) * 3;
|
||||
pbn = drm_dp_calc_pbn_mode(stream->timing.pix_clk_100hz / 10, bpp, false);
|
||||
pbn = drm_dp_calc_pbn_mode(stream->timing.pix_clk_100hz / 10, bpp << 4);
|
||||
if (pbn > full_pbn)
|
||||
return DC_FAIL_BANDWIDTH_VALIDATE;
|
||||
}
|
||||
|
|
|
@ -807,7 +807,7 @@ void dp_decide_lane_settings(
|
|||
const struct link_training_settings *lt_settings,
|
||||
const union lane_adjust ln_adjust[LANE_COUNT_DP_MAX],
|
||||
struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX],
|
||||
union dpcd_training_lane dpcd_lane_settings[LANE_COUNT_DP_MAX])
|
||||
union dpcd_training_lane *dpcd_lane_settings)
|
||||
{
|
||||
uint32_t lane;
|
||||
|
||||
|
|
|
@ -111,7 +111,7 @@ void dp_decide_lane_settings(
|
|||
const struct link_training_settings *lt_settings,
|
||||
const union lane_adjust ln_adjust[LANE_COUNT_DP_MAX],
|
||||
struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX],
|
||||
union dpcd_training_lane dpcd_lane_settings[LANE_COUNT_DP_MAX]);
|
||||
union dpcd_training_lane *dpcd_lane_settings);
|
||||
|
||||
enum dc_dp_training_pattern decide_cr_training_pattern(
|
||||
const struct dc_link_settings *link_settings);
|
||||
|
|
|
@ -2735,10 +2735,8 @@ static int kv_parse_power_table(struct amdgpu_device *adev)
|
|||
non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
|
||||
&non_clock_info_array->nonClockInfo[non_clock_array_index];
|
||||
ps = kzalloc(sizeof(struct kv_ps), GFP_KERNEL);
|
||||
if (ps == NULL) {
|
||||
kfree(adev->pm.dpm.ps);
|
||||
if (ps == NULL)
|
||||
return -ENOMEM;
|
||||
}
|
||||
adev->pm.dpm.ps[i].ps_priv = ps;
|
||||
k = 0;
|
||||
idx = (u8 *)&power_state->v2.clockInfoIndex[0];
|
||||
|
|
|
@ -7379,10 +7379,9 @@ static int si_dpm_init(struct amdgpu_device *adev)
|
|||
kcalloc(4,
|
||||
sizeof(struct amdgpu_clock_voltage_dependency_entry),
|
||||
GFP_KERNEL);
|
||||
if (!adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
|
||||
amdgpu_free_extended_power_table(adev);
|
||||
if (!adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4;
|
||||
adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0;
|
||||
adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0;
|
||||
|
|
|
@ -272,44 +272,36 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
|
|||
le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset));
|
||||
ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
|
||||
dep_table);
|
||||
if (ret) {
|
||||
amdgpu_free_extended_power_table(adev);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
if (power_info->pplib4.usVddciDependencyOnMCLKOffset) {
|
||||
dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
|
||||
(mode_info->atom_context->bios + data_offset +
|
||||
le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset));
|
||||
ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
|
||||
dep_table);
|
||||
if (ret) {
|
||||
amdgpu_free_extended_power_table(adev);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
if (power_info->pplib4.usVddcDependencyOnMCLKOffset) {
|
||||
dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
|
||||
(mode_info->atom_context->bios + data_offset +
|
||||
le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset));
|
||||
ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
|
||||
dep_table);
|
||||
if (ret) {
|
||||
amdgpu_free_extended_power_table(adev);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
if (power_info->pplib4.usMvddDependencyOnMCLKOffset) {
|
||||
dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
|
||||
(mode_info->atom_context->bios + data_offset +
|
||||
le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset));
|
||||
ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
|
||||
dep_table);
|
||||
if (ret) {
|
||||
amdgpu_free_extended_power_table(adev);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
if (power_info->pplib4.usMaxClockVoltageOnDCOffset) {
|
||||
ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v =
|
||||
(ATOM_PPLIB_Clock_Voltage_Limit_Table *)
|
||||
|
@ -339,10 +331,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
|
|||
kcalloc(psl->ucNumEntries,
|
||||
sizeof(struct amdgpu_phase_shedding_limits_entry),
|
||||
GFP_KERNEL);
|
||||
if (!adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) {
|
||||
amdgpu_free_extended_power_table(adev);
|
||||
if (!adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
entry = &psl->entries[0];
|
||||
for (i = 0; i < psl->ucNumEntries; i++) {
|
||||
|
@ -383,10 +373,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
|
|||
ATOM_PPLIB_CAC_Leakage_Record *entry;
|
||||
u32 size = cac_table->ucNumEntries * sizeof(struct amdgpu_cac_leakage_table);
|
||||
adev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL);
|
||||
if (!adev->pm.dpm.dyn_state.cac_leakage_table.entries) {
|
||||
amdgpu_free_extended_power_table(adev);
|
||||
if (!adev->pm.dpm.dyn_state.cac_leakage_table.entries)
|
||||
return -ENOMEM;
|
||||
}
|
||||
entry = &cac_table->entries[0];
|
||||
for (i = 0; i < cac_table->ucNumEntries; i++) {
|
||||
if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
|
||||
|
@ -438,10 +426,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
|
|||
sizeof(struct amdgpu_vce_clock_voltage_dependency_entry);
|
||||
adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries =
|
||||
kzalloc(size, GFP_KERNEL);
|
||||
if (!adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) {
|
||||
amdgpu_free_extended_power_table(adev);
|
||||
if (!adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries)
|
||||
return -ENOMEM;
|
||||
}
|
||||
adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count =
|
||||
limits->numEntries;
|
||||
entry = &limits->entries[0];
|
||||
|
@ -493,10 +479,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
|
|||
sizeof(struct amdgpu_uvd_clock_voltage_dependency_entry);
|
||||
adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries =
|
||||
kzalloc(size, GFP_KERNEL);
|
||||
if (!adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) {
|
||||
amdgpu_free_extended_power_table(adev);
|
||||
if (!adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries)
|
||||
return -ENOMEM;
|
||||
}
|
||||
adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count =
|
||||
limits->numEntries;
|
||||
entry = &limits->entries[0];
|
||||
|
@ -525,10 +509,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
|
|||
sizeof(struct amdgpu_clock_voltage_dependency_entry);
|
||||
adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries =
|
||||
kzalloc(size, GFP_KERNEL);
|
||||
if (!adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) {
|
||||
amdgpu_free_extended_power_table(adev);
|
||||
if (!adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries)
|
||||
return -ENOMEM;
|
||||
}
|
||||
adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count =
|
||||
limits->numEntries;
|
||||
entry = &limits->entries[0];
|
||||
|
@ -548,10 +530,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
|
|||
le16_to_cpu(ext_hdr->usPPMTableOffset));
|
||||
adev->pm.dpm.dyn_state.ppm_table =
|
||||
kzalloc(sizeof(struct amdgpu_ppm_table), GFP_KERNEL);
|
||||
if (!adev->pm.dpm.dyn_state.ppm_table) {
|
||||
amdgpu_free_extended_power_table(adev);
|
||||
if (!adev->pm.dpm.dyn_state.ppm_table)
|
||||
return -ENOMEM;
|
||||
}
|
||||
adev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign;
|
||||
adev->pm.dpm.dyn_state.ppm_table->cpu_core_number =
|
||||
le16_to_cpu(ppm->usCpuCoreNumber);
|
||||
|
@ -583,10 +563,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
|
|||
sizeof(struct amdgpu_clock_voltage_dependency_entry);
|
||||
adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries =
|
||||
kzalloc(size, GFP_KERNEL);
|
||||
if (!adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) {
|
||||
amdgpu_free_extended_power_table(adev);
|
||||
if (!adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries)
|
||||
return -ENOMEM;
|
||||
}
|
||||
adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count =
|
||||
limits->numEntries;
|
||||
entry = &limits->entries[0];
|
||||
|
@ -606,10 +584,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
|
|||
ATOM_PowerTune_Table *pt;
|
||||
adev->pm.dpm.dyn_state.cac_tdp_table =
|
||||
kzalloc(sizeof(struct amdgpu_cac_tdp_table), GFP_KERNEL);
|
||||
if (!adev->pm.dpm.dyn_state.cac_tdp_table) {
|
||||
amdgpu_free_extended_power_table(adev);
|
||||
if (!adev->pm.dpm.dyn_state.cac_tdp_table)
|
||||
return -ENOMEM;
|
||||
}
|
||||
if (rev > 0) {
|
||||
ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *)
|
||||
(mode_info->atom_context->bios + data_offset +
|
||||
|
@ -645,12 +621,10 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
|
|||
ret = amdgpu_parse_clk_voltage_dep_table(
|
||||
&adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk,
|
||||
dep_table);
|
||||
if (ret) {
|
||||
kfree(adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk.entries);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -2974,6 +2974,8 @@ static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
|
|||
result = smu7_get_evv_voltages(hwmgr);
|
||||
if (result) {
|
||||
pr_info("Get EVV Voltage Failed. Abort Driver loading!\n");
|
||||
kfree(hwmgr->backend);
|
||||
hwmgr->backend = NULL;
|
||||
return -EINVAL;
|
||||
}
|
||||
} else {
|
||||
|
@ -3019,8 +3021,10 @@ static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
|
|||
}
|
||||
|
||||
result = smu7_update_edc_leakage_table(hwmgr);
|
||||
if (result)
|
||||
if (result) {
|
||||
smu7_hwmgr_backend_fini(hwmgr);
|
||||
return result;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -4700,13 +4700,12 @@ EXPORT_SYMBOL(drm_dp_check_act_status);
|
|||
|
||||
/**
|
||||
* drm_dp_calc_pbn_mode() - Calculate the PBN for a mode.
|
||||
* @clock: dot clock for the mode
|
||||
* @bpp: bpp for the mode.
|
||||
* @dsc: DSC mode. If true, bpp has units of 1/16 of a bit per pixel
|
||||
* @clock: dot clock
|
||||
* @bpp: bpp as .4 binary fixed point
|
||||
*
|
||||
* This uses the formula in the spec to calculate the PBN value for a mode.
|
||||
*/
|
||||
int drm_dp_calc_pbn_mode(int clock, int bpp, bool dsc)
|
||||
int drm_dp_calc_pbn_mode(int clock, int bpp)
|
||||
{
|
||||
/*
|
||||
* margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
|
||||
|
@ -4717,18 +4716,9 @@ int drm_dp_calc_pbn_mode(int clock, int bpp, bool dsc)
|
|||
* peak_kbps *= (1006/1000)
|
||||
* peak_kbps *= (64/54)
|
||||
* peak_kbps *= 8 convert to bytes
|
||||
*
|
||||
* If the bpp is in units of 1/16, further divide by 16. Put this
|
||||
* factor in the numerator rather than the denominator to avoid
|
||||
* integer overflow
|
||||
*/
|
||||
|
||||
if (dsc)
|
||||
return DIV_ROUND_UP_ULL(mul_u32_u32(clock * (bpp / 16), 64 * 1006),
|
||||
8 * 54 * 1000 * 1000);
|
||||
|
||||
return DIV_ROUND_UP_ULL(mul_u32_u32(clock * bpp, 64 * 1006),
|
||||
8 * 54 * 1000 * 1000);
|
||||
return DIV_ROUND_UP_ULL(mul_u32_u32(clock * bpp, 64 * 1006 >> 4),
|
||||
1000 * 8 * 54 * 1000);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_dp_calc_pbn_mode);
|
||||
|
||||
|
|
|
@ -1042,8 +1042,11 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
|
|||
goto err_minors;
|
||||
}
|
||||
|
||||
if (drm_core_check_feature(dev, DRIVER_MODESET))
|
||||
drm_modeset_register_all(dev);
|
||||
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
|
||||
ret = drm_modeset_register_all(dev);
|
||||
if (ret)
|
||||
goto err_unload;
|
||||
}
|
||||
|
||||
DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
|
||||
driver->name, driver->major, driver->minor,
|
||||
|
@ -1053,6 +1056,9 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
|
|||
|
||||
goto out_unlock;
|
||||
|
||||
err_unload:
|
||||
if (dev->driver->unload)
|
||||
dev->driver->unload(dev);
|
||||
err_minors:
|
||||
remove_compat_control_link(dev);
|
||||
drm_minor_unregister(dev, DRM_MINOR_ACCEL);
|
||||
|
|
|
@ -109,8 +109,7 @@ static int intel_dp_mst_find_vcpi_slots_for_bpp(struct intel_encoder *encoder,
|
|||
continue;
|
||||
|
||||
crtc_state->pbn = drm_dp_calc_pbn_mode(adjusted_mode->crtc_clock,
|
||||
dsc ? bpp << 4 : bpp,
|
||||
dsc);
|
||||
bpp << 4);
|
||||
|
||||
slots = drm_dp_atomic_find_time_slots(state, &intel_dp->mst_mgr,
|
||||
connector->port,
|
||||
|
@ -941,7 +940,7 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector,
|
|||
return ret;
|
||||
|
||||
if (mode_rate > max_rate || mode->clock > max_dotclk ||
|
||||
drm_dp_calc_pbn_mode(mode->clock, min_bpp, false) > port->full_pbn) {
|
||||
drm_dp_calc_pbn_mode(mode->clock, min_bpp << 4) > port->full_pbn) {
|
||||
*status = MODE_CLOCK_HIGH;
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -832,7 +832,7 @@ struct edid *drm_dp_mst_get_edid(struct drm_connector *connector,
|
|||
int drm_dp_get_vc_payload_bw(const struct drm_dp_mst_topology_mgr *mgr,
|
||||
int link_rate, int link_lane_count);
|
||||
|
||||
int drm_dp_calc_pbn_mode(int clock, int bpp, bool dsc);
|
||||
int drm_dp_calc_pbn_mode(int clock, int bpp);
|
||||
|
||||
void drm_dp_mst_update_slots(struct drm_dp_mst_topology_state *mst_state, uint8_t link_encoding_cap);
|
||||
|
||||
|
|
|
@ -192,7 +192,7 @@ struct drm_bridge_funcs {
|
|||
* or &drm_encoder_helper_funcs.dpms hook.
|
||||
*
|
||||
* The bridge must assume that the display pipe (i.e. clocks and timing
|
||||
* singals) feeding it is no longer running when this callback is
|
||||
* signals) feeding it is no longer running when this callback is
|
||||
* called.
|
||||
*
|
||||
* The @post_disable callback is optional.
|
||||
|
|
|
@ -2321,7 +2321,7 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
|
|||
switch (prim_walk) {
|
||||
case 1:
|
||||
for (i = 0; i < track->num_arrays; i++) {
|
||||
size = track->arrays[i].esize * track->max_indx * 4;
|
||||
size = track->arrays[i].esize * track->max_indx * 4UL;
|
||||
if (track->arrays[i].robj == NULL) {
|
||||
DRM_ERROR("(PW %u) Vertex array %u no buffer "
|
||||
"bound\n", prim_walk, i);
|
||||
|
@ -2340,7 +2340,7 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
|
|||
break;
|
||||
case 2:
|
||||
for (i = 0; i < track->num_arrays; i++) {
|
||||
size = track->arrays[i].esize * (nverts - 1) * 4;
|
||||
size = track->arrays[i].esize * (nverts - 1) * 4UL;
|
||||
if (track->arrays[i].robj == NULL) {
|
||||
DRM_ERROR("(PW %u) Vertex array %u no buffer "
|
||||
"bound\n", prim_walk, i);
|
||||
|
|
|
@ -1275,7 +1275,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
|
|||
return -EINVAL;
|
||||
}
|
||||
tmp = (reg - CB_COLOR0_BASE) / 4;
|
||||
track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8;
|
||||
track->cb_color_bo_offset[tmp] = (u64)radeon_get_ib_value(p, idx) << 8;
|
||||
ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
|
||||
track->cb_color_base_last[tmp] = ib[idx];
|
||||
track->cb_color_bo[tmp] = reloc->robj;
|
||||
|
@ -1302,7 +1302,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
|
|||
"0x%04X\n", reg);
|
||||
return -EINVAL;
|
||||
}
|
||||
track->htile_offset = radeon_get_ib_value(p, idx) << 8;
|
||||
track->htile_offset = (u64)radeon_get_ib_value(p, idx) << 8;
|
||||
ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
|
||||
track->htile_bo = reloc->robj;
|
||||
track->db_dirty = true;
|
||||
|
|
|
@ -687,11 +687,16 @@ static void radeon_crtc_init(struct drm_device *dev, int index)
|
|||
if (radeon_crtc == NULL)
|
||||
return;
|
||||
|
||||
radeon_crtc->flip_queue = alloc_workqueue("radeon-crtc", WQ_HIGHPRI, 0);
|
||||
if (!radeon_crtc->flip_queue) {
|
||||
kfree(radeon_crtc);
|
||||
return;
|
||||
}
|
||||
|
||||
drm_crtc_init(dev, &radeon_crtc->base, &radeon_crtc_funcs);
|
||||
|
||||
drm_mode_crtc_set_gamma_size(&radeon_crtc->base, 256);
|
||||
radeon_crtc->crtc_id = index;
|
||||
radeon_crtc->flip_queue = alloc_workqueue("radeon-crtc", WQ_HIGHPRI, 0);
|
||||
rdev->mode_info.crtcs[index] = radeon_crtc;
|
||||
|
||||
if (rdev->family >= CHIP_BONAIRE) {
|
||||
|
|
|
@ -1204,13 +1204,17 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
|
|||
r = radeon_bo_create(rdev, pd_size, align, true,
|
||||
RADEON_GEM_DOMAIN_VRAM, 0, NULL,
|
||||
NULL, &vm->page_directory);
|
||||
if (r)
|
||||
if (r) {
|
||||
kfree(vm->page_tables);
|
||||
vm->page_tables = NULL;
|
||||
return r;
|
||||
|
||||
}
|
||||
r = radeon_vm_clear_bo(rdev, vm->page_directory);
|
||||
if (r) {
|
||||
radeon_bo_unref(&vm->page_directory);
|
||||
vm->page_directory = NULL;
|
||||
kfree(vm->page_tables);
|
||||
vm->page_tables = NULL;
|
||||
return r;
|
||||
}
|
||||
|
||||
|
|
|
@ -3611,6 +3611,10 @@ static int si_cp_start(struct radeon_device *rdev)
|
|||
for (i = RADEON_RING_TYPE_GFX_INDEX; i <= CAYMAN_RING_TYPE_CP2_INDEX; ++i) {
|
||||
ring = &rdev->ring[i];
|
||||
r = radeon_ring_lock(rdev, ring, 2);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
/* clear the compute context state */
|
||||
radeon_ring_write(ring, PACKET3_COMPUTE(PACKET3_CLEAR_STATE, 0));
|
||||
|
|
|
@ -1493,8 +1493,10 @@ static int sumo_parse_power_table(struct radeon_device *rdev)
|
|||
non_clock_array_index = power_state->v2.nonClockInfoIndex;
|
||||
non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
|
||||
&non_clock_info_array->nonClockInfo[non_clock_array_index];
|
||||
if (!rdev->pm.power_state[i].clock_info)
|
||||
if (!rdev->pm.power_state[i].clock_info) {
|
||||
kfree(rdev->pm.dpm.ps);
|
||||
return -EINVAL;
|
||||
}
|
||||
ps = kzalloc(sizeof(struct sumo_ps), GFP_KERNEL);
|
||||
if (ps == NULL) {
|
||||
kfree(rdev->pm.dpm.ps);
|
||||
|
|
|
@ -1726,8 +1726,10 @@ static int trinity_parse_power_table(struct radeon_device *rdev)
|
|||
non_clock_array_index = power_state->v2.nonClockInfoIndex;
|
||||
non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
|
||||
&non_clock_info_array->nonClockInfo[non_clock_array_index];
|
||||
if (!rdev->pm.power_state[i].clock_info)
|
||||
if (!rdev->pm.power_state[i].clock_info) {
|
||||
kfree(rdev->pm.dpm.ps);
|
||||
return -EINVAL;
|
||||
}
|
||||
ps = kzalloc(sizeof(struct sumo_ps), GFP_KERNEL);
|
||||
if (ps == NULL) {
|
||||
kfree(rdev->pm.dpm.ps);
|
||||
|
|
|
@ -31,7 +31,7 @@ POSSIBILITY OF SUCH DAMAGE.
|
|||
|
||||
***************************************************************************/
|
||||
|
||||
/* $OpenBSD: if_em.c,v 1.370 2023/12/31 08:42:33 mglocker Exp $ */
|
||||
/* $OpenBSD: if_em.c,v 1.371 2024/01/28 18:42:58 mglocker Exp $ */
|
||||
/* $FreeBSD: if_em.c,v 1.46 2004/09/29 18:28:28 mlaier Exp $ */
|
||||
|
||||
#include <dev/pci/if_em.h>
|
||||
|
@ -291,8 +291,6 @@ void em_receive_checksum(struct em_softc *, struct em_rx_desc *,
|
|||
struct mbuf *);
|
||||
u_int em_transmit_checksum_setup(struct em_queue *, struct mbuf *, u_int,
|
||||
u_int32_t *, u_int32_t *);
|
||||
u_int em_tso_setup(struct em_queue *, struct mbuf *, u_int, u_int32_t *,
|
||||
u_int32_t *);
|
||||
u_int em_tx_ctx_setup(struct em_queue *, struct mbuf *, u_int, u_int32_t *,
|
||||
u_int32_t *);
|
||||
void em_iff(struct em_softc *);
|
||||
|
@ -1190,7 +1188,7 @@ em_flowstatus(struct em_softc *sc)
|
|||
*
|
||||
* This routine maps the mbufs to tx descriptors.
|
||||
*
|
||||
* return 0 on failure, positive on success
|
||||
* return 0 on success, positive on failure
|
||||
**********************************************************************/
|
||||
u_int
|
||||
em_encap(struct em_queue *que, struct mbuf *m)
|
||||
|
@ -1238,15 +1236,7 @@ em_encap(struct em_queue *que, struct mbuf *m)
|
|||
}
|
||||
|
||||
if (sc->hw.mac_type >= em_82575 && sc->hw.mac_type <= em_i210) {
|
||||
if (ISSET(m->m_pkthdr.csum_flags, M_TCP_TSO)) {
|
||||
used += em_tso_setup(que, m, head, &txd_upper,
|
||||
&txd_lower);
|
||||
if (!used)
|
||||
return (used);
|
||||
} else {
|
||||
used += em_tx_ctx_setup(que, m, head, &txd_upper,
|
||||
&txd_lower);
|
||||
}
|
||||
used += em_tx_ctx_setup(que, m, head, &txd_upper, &txd_lower);
|
||||
} else if (sc->hw.mac_type >= em_82543) {
|
||||
used += em_transmit_checksum_setup(que, m, head,
|
||||
&txd_upper, &txd_lower);
|
||||
|
@ -1579,21 +1569,6 @@ em_update_link_status(struct em_softc *sc)
|
|||
ifp->if_link_state = link_state;
|
||||
if_link_state_change(ifp);
|
||||
}
|
||||
|
||||
/* Disable TSO for 10/100 speeds to avoid some hardware issues */
|
||||
switch (sc->link_speed) {
|
||||
case SPEED_10:
|
||||
case SPEED_100:
|
||||
if (sc->hw.mac_type >= em_82575 && sc->hw.mac_type <= em_i210) {
|
||||
ifp->if_capabilities &= ~IFCAP_TSOv4;
|
||||
ifp->if_capabilities &= ~IFCAP_TSOv6;
|
||||
}
|
||||
break;
|
||||
case SPEED_1000:
|
||||
if (sc->hw.mac_type >= em_82575 && sc->hw.mac_type <= em_i210)
|
||||
ifp->if_capabilities |= IFCAP_TSOv4 | IFCAP_TSOv6;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/*********************************************************************
|
||||
|
@ -2013,7 +1988,6 @@ em_setup_interface(struct em_softc *sc)
|
|||
if (sc->hw.mac_type >= em_82575 && sc->hw.mac_type <= em_i210) {
|
||||
ifp->if_capabilities |= IFCAP_CSUM_IPv4;
|
||||
ifp->if_capabilities |= IFCAP_CSUM_TCPv6 | IFCAP_CSUM_UDPv6;
|
||||
ifp->if_capabilities |= IFCAP_TSOv4 | IFCAP_TSOv6;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -2257,9 +2231,9 @@ em_setup_transmit_structures(struct em_softc *sc)
|
|||
|
||||
for (i = 0; i < sc->sc_tx_slots; i++) {
|
||||
pkt = &que->tx.sc_tx_pkts_ring[i];
|
||||
error = bus_dmamap_create(sc->sc_dmat, EM_TSO_SIZE,
|
||||
error = bus_dmamap_create(sc->sc_dmat, MAX_JUMBO_FRAME_SIZE,
|
||||
EM_MAX_SCATTER / (sc->pcix_82544 ? 2 : 1),
|
||||
EM_TSO_SEG_SIZE, 0, BUS_DMA_NOWAIT, &pkt->pkt_map);
|
||||
MAX_JUMBO_FRAME_SIZE, 0, BUS_DMA_NOWAIT, &pkt->pkt_map);
|
||||
if (error != 0) {
|
||||
printf("%s: Unable to create TX DMA map\n",
|
||||
DEVNAME(sc));
|
||||
|
@ -2431,81 +2405,6 @@ em_free_transmit_structures(struct em_softc *sc)
|
|||
}
|
||||
}
|
||||
|
||||
u_int
|
||||
em_tso_setup(struct em_queue *que, struct mbuf *mp, u_int head,
|
||||
u_int32_t *olinfo_status, u_int32_t *cmd_type_len)
|
||||
{
|
||||
struct ether_extracted ext;
|
||||
struct e1000_adv_tx_context_desc *TD;
|
||||
uint32_t vlan_macip_lens = 0, type_tucmd_mlhl = 0, mss_l4len_idx = 0;
|
||||
uint32_t paylen = 0;
|
||||
uint8_t iphlen = 0;
|
||||
|
||||
*olinfo_status = 0;
|
||||
*cmd_type_len = 0;
|
||||
TD = (struct e1000_adv_tx_context_desc *)&que->tx.sc_tx_desc_ring[head];
|
||||
|
||||
#if NVLAN > 0
|
||||
if (ISSET(mp->m_flags, M_VLANTAG)) {
|
||||
uint32_t vtag = mp->m_pkthdr.ether_vtag;
|
||||
vlan_macip_lens |= vtag << E1000_ADVTXD_VLAN_SHIFT;
|
||||
*cmd_type_len |= E1000_ADVTXD_DCMD_VLE;
|
||||
}
|
||||
#endif
|
||||
|
||||
ether_extract_headers(mp, &ext);
|
||||
if (ext.tcp == NULL)
|
||||
goto out;
|
||||
|
||||
vlan_macip_lens |= (sizeof(*ext.eh) << E1000_ADVTXD_MACLEN_SHIFT);
|
||||
|
||||
if (ext.ip4) {
|
||||
iphlen = ext.ip4->ip_hl << 2;
|
||||
|
||||
type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV4;
|
||||
*olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
|
||||
#ifdef INET6
|
||||
} else if (ext.ip6) {
|
||||
iphlen = sizeof(*ext.ip6);
|
||||
|
||||
type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV6;
|
||||
#endif
|
||||
} else {
|
||||
goto out;
|
||||
}
|
||||
|
||||
*cmd_type_len |= E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS;
|
||||
*cmd_type_len |= E1000_ADVTXD_DCMD_DEXT | E1000_ADVTXD_DCMD_TSE;
|
||||
paylen = mp->m_pkthdr.len - sizeof(*ext.eh) - iphlen -
|
||||
(ext.tcp->th_off << 2);
|
||||
*olinfo_status |= paylen << E1000_ADVTXD_PAYLEN_SHIFT;
|
||||
vlan_macip_lens |= iphlen;
|
||||
type_tucmd_mlhl |= E1000_ADVTXD_DCMD_DEXT | E1000_ADVTXD_DTYP_CTXT;
|
||||
|
||||
type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP;
|
||||
*olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
|
||||
|
||||
mss_l4len_idx |= mp->m_pkthdr.ph_mss << E1000_ADVTXD_MSS_SHIFT;
|
||||
mss_l4len_idx |= (ext.tcp->th_off << 2) << E1000_ADVTXD_L4LEN_SHIFT;
|
||||
/* 82575 needs the queue index added */
|
||||
if (que->sc->hw.mac_type == em_82575)
|
||||
mss_l4len_idx |= (que->me & 0xff) << 4;
|
||||
|
||||
htolem32(&TD->vlan_macip_lens, vlan_macip_lens);
|
||||
htolem32(&TD->type_tucmd_mlhl, type_tucmd_mlhl);
|
||||
htolem32(&TD->u.seqnum_seed, 0);
|
||||
htolem32(&TD->mss_l4len_idx, mss_l4len_idx);
|
||||
|
||||
tcpstat_add(tcps_outpkttso, (paylen + mp->m_pkthdr.ph_mss - 1) /
|
||||
mp->m_pkthdr.ph_mss);
|
||||
|
||||
return 1;
|
||||
|
||||
out:
|
||||
tcpstat_inc(tcps_outbadtso);
|
||||
return 0;
|
||||
}
|
||||
|
||||
u_int
|
||||
em_tx_ctx_setup(struct em_queue *que, struct mbuf *mp, u_int head,
|
||||
u_int32_t *olinfo_status, u_int32_t *cmd_type_len)
|
||||
|
|
|
@ -32,7 +32,7 @@ POSSIBILITY OF SUCH DAMAGE.
|
|||
***************************************************************************/
|
||||
|
||||
/* $FreeBSD: if_em.h,v 1.26 2004/09/01 23:22:41 pdeuskar Exp $ */
|
||||
/* $OpenBSD: if_em.h,v 1.81 2023/12/31 08:42:33 mglocker Exp $ */
|
||||
/* $OpenBSD: if_em.h,v 1.82 2024/01/28 18:42:58 mglocker Exp $ */
|
||||
|
||||
#ifndef _EM_H_DEFINED_
|
||||
#define _EM_H_DEFINED_
|
||||
|
@ -55,14 +55,11 @@ POSSIBILITY OF SUCH DAMAGE.
|
|||
|
||||
#include <net/if.h>
|
||||
#include <net/if_media.h>
|
||||
#include <net/route.h>
|
||||
|
||||
#include <netinet/in.h>
|
||||
#include <netinet/ip.h>
|
||||
#include <netinet/if_ether.h>
|
||||
#include <netinet/tcp.h>
|
||||
#include <netinet/tcp_timer.h>
|
||||
#include <netinet/tcp_var.h>
|
||||
#include <netinet/udp.h>
|
||||
|
||||
#if NBPFILTER > 0
|
||||
|
@ -272,7 +269,6 @@ typedef int boolean_t;
|
|||
|
||||
#define EM_MAX_SCATTER 64
|
||||
#define EM_TSO_SIZE 65535
|
||||
#define EM_TSO_SEG_SIZE 4096 /* Max dma segment size */
|
||||
|
||||
struct em_packet {
|
||||
int pkt_eop; /* Index of the desc to watch */
|
||||
|
|
|
@ -31,7 +31,7 @@
|
|||
|
||||
*******************************************************************************/
|
||||
|
||||
/* $OpenBSD: if_em_hw.h,v 1.91 2023/12/31 08:42:33 mglocker Exp $ */
|
||||
/* $OpenBSD: if_em_hw.h,v 1.92 2024/01/28 18:42:58 mglocker Exp $ */
|
||||
/* $FreeBSD: if_em_hw.h,v 1.15 2005/05/26 23:32:02 tackerman Exp $ */
|
||||
|
||||
/* if_em_hw.h
|
||||
|
@ -2150,7 +2150,6 @@ struct e1000_adv_tx_context_desc {
|
|||
#define E1000_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
|
||||
#define E1000_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */
|
||||
#define E1000_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */
|
||||
#define E1000_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */
|
||||
#define E1000_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */
|
||||
|
||||
/* Adv Transmit Descriptor Config Masks */
|
||||
|
@ -2161,10 +2160,6 @@ struct e1000_adv_tx_context_desc {
|
|||
#define E1000_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */
|
||||
#define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */
|
||||
|
||||
/* Req requires Markers and CRC */
|
||||
#define E1000_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */
|
||||
#define E1000_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */
|
||||
|
||||
/* Multiple Receive Queue Control */
|
||||
#define E1000_MRQC_ENABLE_MASK 0x00000003
|
||||
#define E1000_MRQC_ENABLE_RSS_2Q 0x00000001
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $OpenBSD: pucdata.c,v 1.118 2022/10/24 05:57:58 jsg Exp $ */
|
||||
/* $OpenBSD: pucdata.c,v 1.119 2024/01/28 03:01:39 jsg Exp $ */
|
||||
/* $NetBSD: pucdata.c,v 1.6 1999/07/03 05:55:23 cgd Exp $ */
|
||||
|
||||
/*
|
||||
|
@ -306,6 +306,13 @@ const struct puc_device_description puc_devs[] = {
|
|||
{ PUC_PORT_COM, 0x10, 0x0000 },
|
||||
},
|
||||
},
|
||||
{ /* Intel C3000 UART */
|
||||
{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C3000_HSUART, 0x0000, 0x0000 },
|
||||
{ 0xffff, 0xffff, 0x0000, 0x0000 },
|
||||
{
|
||||
{ PUC_PORT_COM, 0x10, 0x0000 },
|
||||
},
|
||||
},
|
||||
/*
|
||||
* XXX no entry because I have no data:
|
||||
* XXX Dolphin Peripherals 4006 (single parallel)
|
||||
|
|
|
@ -1669,10 +1669,10 @@ local block_state deflate_stored(deflate_state *s, int flush) {
|
|||
_tr_stored_block(s, (char *)0, 0L, last);
|
||||
|
||||
/* Replace the lengths in the dummy stored block with len. */
|
||||
s->pending_buf[s->pending - 4] = len;
|
||||
s->pending_buf[s->pending - 3] = len >> 8;
|
||||
s->pending_buf[s->pending - 2] = ~len;
|
||||
s->pending_buf[s->pending - 1] = ~len >> 8;
|
||||
s->pending_buf[s->pending - 4] = (Bytef)len;
|
||||
s->pending_buf[s->pending - 3] = (Bytef)(len >> 8);
|
||||
s->pending_buf[s->pending - 2] = (Bytef)~len;
|
||||
s->pending_buf[s->pending - 1] = (Bytef)(~len >> 8);
|
||||
|
||||
/* Write the stored block header bytes. */
|
||||
flush_pending(s->strm);
|
||||
|
|
|
@ -722,7 +722,7 @@ local void scan_tree(deflate_state *s, ct_data *tree, int max_code) {
|
|||
if (++count < max_count && curlen == nextlen) {
|
||||
continue;
|
||||
} else if (count < min_count) {
|
||||
s->bl_tree[curlen].Freq += count;
|
||||
s->bl_tree[curlen].Freq += (ush)count;
|
||||
} else if (curlen != 0) {
|
||||
if (curlen != prevlen) s->bl_tree[curlen].Freq++;
|
||||
s->bl_tree[REP_3_6].Freq++;
|
||||
|
|
|
@ -157,7 +157,7 @@ extern z_const char * const z_errmsg[10]; /* indexed by 2-zlib_error */
|
|||
# define OS_CODE 7
|
||||
#endif
|
||||
|
||||
#ifdef __acorn
|
||||
#if defined(__acorn) || defined(__riscos)
|
||||
# define OS_CODE 13
|
||||
#endif
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $OpenBSD: in_pcb.c,v 1.286 2024/01/19 02:24:07 bluhm Exp $ */
|
||||
/* $OpenBSD: in_pcb.c,v 1.287 2024/01/28 20:34:25 bluhm Exp $ */
|
||||
/* $NetBSD: in_pcb.c,v 1.25 1996/02/13 23:41:53 christos Exp $ */
|
||||
|
||||
/*
|
||||
|
@ -720,18 +720,14 @@ in_peeraddr(struct socket *so, struct mbuf *nam)
|
|||
* any errors for each matching socket.
|
||||
*/
|
||||
void
|
||||
in_pcbnotifyall(struct inpcbtable *table, struct sockaddr *dst, u_int rtable,
|
||||
int errno, void (*notify)(struct inpcb *, int))
|
||||
in_pcbnotifyall(struct inpcbtable *table, const struct sockaddr_in *dst,
|
||||
u_int rtable, int errno, void (*notify)(struct inpcb *, int))
|
||||
{
|
||||
SIMPLEQ_HEAD(, inpcb) inpcblist;
|
||||
struct inpcb *inp;
|
||||
struct in_addr faddr;
|
||||
u_int rdomain;
|
||||
|
||||
if (dst->sa_family != AF_INET)
|
||||
return;
|
||||
faddr = satosin(dst)->sin_addr;
|
||||
if (faddr.s_addr == INADDR_ANY)
|
||||
if (dst->sin_addr.s_addr == INADDR_ANY)
|
||||
return;
|
||||
if (notify == NULL)
|
||||
return;
|
||||
|
@ -754,7 +750,7 @@ in_pcbnotifyall(struct inpcbtable *table, struct sockaddr *dst, u_int rtable,
|
|||
if (ISSET(inp->inp_flags, INP_IPV6))
|
||||
continue;
|
||||
#endif
|
||||
if (inp->inp_faddr.s_addr != faddr.s_addr ||
|
||||
if (inp->inp_faddr.s_addr != dst->sin_addr.s_addr ||
|
||||
rtable_l2(inp->inp_rtableid) != rdomain) {
|
||||
continue;
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $OpenBSD: in_pcb.h,v 1.148 2024/01/09 19:57:00 bluhm Exp $ */
|
||||
/* $OpenBSD: in_pcb.h,v 1.149 2024/01/28 20:34:25 bluhm Exp $ */
|
||||
/* $NetBSD: in_pcb.h,v 1.14 1996/02/13 23:42:00 christos Exp $ */
|
||||
|
||||
/*
|
||||
|
@ -352,7 +352,7 @@ void in_pcbinit(struct inpcbtable *, int);
|
|||
struct inpcb *
|
||||
in_pcblookup_local_lock(struct inpcbtable *, const void *, u_int, int,
|
||||
u_int, int);
|
||||
void in_pcbnotifyall(struct inpcbtable *, struct sockaddr *,
|
||||
void in_pcbnotifyall(struct inpcbtable *, const struct sockaddr_in *,
|
||||
u_int, int, void (*)(struct inpcb *, int));
|
||||
void in_pcbrehash(struct inpcb *);
|
||||
void in_rtchange(struct inpcb *, int);
|
||||
|
@ -367,7 +367,7 @@ struct rtentry *
|
|||
in_pcbrtentry(struct inpcb *);
|
||||
|
||||
/* INET6 stuff */
|
||||
void in6_pcbnotify(struct inpcbtable *, struct sockaddr_in6 *,
|
||||
void in6_pcbnotify(struct inpcbtable *, const struct sockaddr_in6 *,
|
||||
u_int, const struct sockaddr_in6 *, u_int, u_int, int, void *,
|
||||
void (*)(struct inpcb *, int));
|
||||
int in6_selecthlim(const struct inpcb *);
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $OpenBSD: tcp_subr.c,v 1.196 2024/01/27 21:13:46 bluhm Exp $ */
|
||||
/* $OpenBSD: tcp_subr.c,v 1.197 2024/01/28 20:34:25 bluhm Exp $ */
|
||||
/* $NetBSD: tcp_subr.c,v 1.22 1996/02/13 23:44:00 christos Exp $ */
|
||||
|
||||
/*
|
||||
|
@ -833,7 +833,7 @@ tcp_ctlinput(int cmd, struct sockaddr *sa, u_int rdomain, void *v)
|
|||
}
|
||||
in_pcbunref(inp);
|
||||
} else
|
||||
in_pcbnotifyall(&tcbtable, sa, rdomain, errno, notify);
|
||||
in_pcbnotifyall(&tcbtable, satosin(sa), rdomain, errno, notify);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $OpenBSD: tcp_timer.c,v 1.75 2024/01/27 21:35:13 bluhm Exp $ */
|
||||
/* $OpenBSD: tcp_timer.c,v 1.76 2024/01/28 20:34:25 bluhm Exp $ */
|
||||
/* $NetBSD: tcp_timer.c,v 1.14 1996/02/13 23:44:09 christos Exp $ */
|
||||
|
||||
/*
|
||||
|
@ -236,8 +236,8 @@ tcp_timer_rexmt(void *arg)
|
|||
sin.sin_len = sizeof(sin);
|
||||
sin.sin_family = AF_INET;
|
||||
sin.sin_addr = inp->inp_faddr;
|
||||
in_pcbnotifyall(&tcbtable, sintosa(&sin), inp->inp_rtableid,
|
||||
EMSGSIZE, tcp_mtudisc);
|
||||
in_pcbnotifyall(&tcbtable, &sin, inp->inp_rtableid, EMSGSIZE,
|
||||
tcp_mtudisc);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $OpenBSD: udp_usrreq.c,v 1.315 2024/01/21 01:17:20 bluhm Exp $ */
|
||||
/* $OpenBSD: udp_usrreq.c,v 1.316 2024/01/28 20:34:25 bluhm Exp $ */
|
||||
/* $NetBSD: udp_usrreq.c,v 1.28 1996/03/16 23:54:03 christos Exp $ */
|
||||
|
||||
/*
|
||||
|
@ -919,7 +919,7 @@ udp_ctlinput(int cmd, struct sockaddr *sa, u_int rdomain, void *v)
|
|||
notify(inp, errno);
|
||||
in_pcbunref(inp);
|
||||
} else
|
||||
in_pcbnotifyall(&udbtable, sa, rdomain, errno, notify);
|
||||
in_pcbnotifyall(&udbtable, satosin(sa), rdomain, errno, notify);
|
||||
}
|
||||
|
||||
int
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $OpenBSD: in6_pcb.c,v 1.132 2024/01/09 19:57:01 bluhm Exp $ */
|
||||
/* $OpenBSD: in6_pcb.c,v 1.133 2024/01/28 20:34:25 bluhm Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
|
||||
|
@ -423,7 +423,7 @@ in6_peeraddr(struct socket *so, struct mbuf *nam)
|
|||
* once PCB to be notified has been located.
|
||||
*/
|
||||
void
|
||||
in6_pcbnotify(struct inpcbtable *table, struct sockaddr_in6 *dst,
|
||||
in6_pcbnotify(struct inpcbtable *table, const struct sockaddr_in6 *dst,
|
||||
uint fport_arg, const struct sockaddr_in6 *src, uint lport_arg,
|
||||
u_int rtable, int cmd, void *cmdarg, void (*notify)(struct inpcb *, int))
|
||||
{
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
# $OpenBSD: Makefile,v 1.1 2015/07/21 04:06:04 yasuoka Exp $
|
||||
# $OpenBSD: Makefile,v 1.2 2024/01/28 18:38:16 deraadt Exp $
|
||||
PROG= radiusd_bsdauth
|
||||
BINDIR= /usr/libexec/radiusd
|
||||
SRCS= radiusd_bsdauth.c radiusd_module.c imsg_subr.c
|
||||
LDADD+= -lradius -lcrypto -lutil
|
||||
LDADD+= ${LIBRADIUS} ${LIBCRYPTO} ${LIBUTIL}
|
||||
DPADD+= ${LIBRADIUS} ${LIBCRYPTO} ${LIBUTIL}
|
||||
NOMAN= #
|
||||
|
||||
.include <bsd.prog.mk>
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
# $OpenBSD: Makefile,v 1.1 2015/07/21 04:06:04 yasuoka Exp $
|
||||
# $OpenBSD: Makefile,v 1.2 2024/01/28 18:38:16 deraadt Exp $
|
||||
PROG= radiusd_radius
|
||||
BINDIR= /usr/libexec/radiusd
|
||||
SRCS= radiusd_radius.c radiusd_module.c util.c imsg_subr.c log.c
|
||||
CFLAGS+= -DUSE_LIBEVENT
|
||||
LDADD+= -lradius -lcrypto -lutil -levent
|
||||
LDADD+= ${LIBRADIUS} ${LIBCRYPTO} ${LIBUTIL} ${LIBEVENT}
|
||||
DPADD+= ${LIBRADIUS} ${LIBCRYPTO} ${LIBUTIL} ${LIBEVENT}
|
||||
NOMAN= #
|
||||
|
||||
.include <bsd.prog.mk>
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $OpenBSD: smtp_session.c,v 1.440 2024/01/20 09:01:03 claudio Exp $ */
|
||||
/* $OpenBSD: smtp_session.c,v 1.441 2024/01/28 17:23:17 op Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 2008 Gilles Chehade <gilles@poolp.org>
|
||||
|
@ -489,7 +489,7 @@ header_domain_append_callback(struct smtp_tx *tx, const char *hdr,
|
|||
quote = !quote;
|
||||
if (line[i] == ')' && !escape && !quote && comment)
|
||||
comment--;
|
||||
if (line[i] == '\\' && !escape && !comment && !quote)
|
||||
if (line[i] == '\\' && !escape && !comment)
|
||||
escape = 1;
|
||||
else
|
||||
escape = 0;
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue