Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fips 9.2 Sync Certified with Compliant (From kernel-src-tree) #16

Open
wants to merge 5 commits into
base: FIPS-9-COMPLIANT
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 2 additions & 5 deletions drivers/nvme/target/tcp.c
Original file line number Diff line number Diff line change
Expand Up @@ -346,6 +346,7 @@ static void nvmet_tcp_fatal_error(struct nvmet_tcp_queue *queue)

static void nvmet_tcp_socket_error(struct nvmet_tcp_queue *queue, int status)
{
queue->rcv_state = NVMET_TCP_RECV_ERR;
if (status == -EPIPE || status == -ECONNRESET)
kernel_sock_shutdown(queue->sock, SHUT_RDWR);
else
Expand Down Expand Up @@ -872,15 +873,11 @@ static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue)
iov.iov_len = sizeof(*icresp);
ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
if (ret < 0)
goto free_crypto;
return ret; /* queue removal will cleanup */

queue->state = NVMET_TCP_Q_LIVE;
nvmet_prepare_receive_pdu(queue);
return 0;
free_crypto:
if (queue->hdr_digest || queue->data_digest)
nvmet_tcp_free_crypto(queue);
return ret;
}

static void nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue,
Expand Down
110 changes: 107 additions & 3 deletions kernel/bpf/verifier.c
Original file line number Diff line number Diff line change
Expand Up @@ -695,6 +695,12 @@ static bool dynptr_type_refcounted(enum bpf_dynptr_type type)
return type == BPF_DYNPTR_TYPE_RINGBUF;
}

static void __mark_reg_not_init(const struct bpf_verifier_env *env,
struct bpf_reg_state *reg);

static int destroy_if_dynptr_stack_slot(struct bpf_verifier_env *env,
struct bpf_func_state *state, int spi);

static int mark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
enum bpf_arg_type arg_type, int insn_idx)
{
Expand Down Expand Up @@ -762,6 +768,55 @@ static int unmark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_re
return 0;
}

static void __mark_reg_unknown(const struct bpf_verifier_env *env,
struct bpf_reg_state *reg);

static int destroy_if_dynptr_stack_slot(struct bpf_verifier_env *env,
struct bpf_func_state *state, int spi)
{
int i;

/* We always ensure that STACK_DYNPTR is never set partially,
* hence just checking for slot_type[0] is enough. This is
* different for STACK_SPILL, where it may be only set for
* 1 byte, so code has to use is_spilled_reg.
*/
if (state->stack[spi].slot_type[0] != STACK_DYNPTR)
return 0;

/* Reposition spi to first slot */
if (!state->stack[spi].spilled_ptr.dynptr.first_slot)
spi = spi + 1;

if (dynptr_type_refcounted(state->stack[spi].spilled_ptr.dynptr.type)) {
verbose(env, "cannot overwrite referenced dynptr\n");
return -EINVAL;
}

mark_stack_slot_scratched(env, spi);
mark_stack_slot_scratched(env, spi - 1);

/* Writing partially to one dynptr stack slot destroys both. */
for (i = 0; i < BPF_REG_SIZE; i++) {
state->stack[spi].slot_type[i] = STACK_INVALID;
state->stack[spi - 1].slot_type[i] = STACK_INVALID;
}

/* TODO: Invalidate any slices associated with this dynptr */

/* Do not release reference state, we are destroying dynptr on stack,
* not using some helper to release it. Just reset register.
*/
__mark_reg_not_init(env, &state->stack[spi].spilled_ptr);
__mark_reg_not_init(env, &state->stack[spi - 1].spilled_ptr);

/* Same reason as unmark_stack_slots_dynptr above */
state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
state->stack[spi - 1].spilled_ptr.live |= REG_LIVE_WRITTEN;

return 0;
}

static bool is_dynptr_reg_valid_uninit(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
{
struct bpf_func_state *state = func(env, reg);
Expand Down Expand Up @@ -1300,9 +1355,6 @@ static const int caller_saved[CALLER_SAVED_REGS] = {
BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5
};

static void __mark_reg_not_init(const struct bpf_verifier_env *env,
struct bpf_reg_state *reg);

/* This helper doesn't clear reg->id */
static void ___mark_reg_known(struct bpf_reg_state *reg, u64 imm)
{
Expand Down Expand Up @@ -2645,6 +2697,21 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx,
}
} else if (opcode == BPF_EXIT) {
return -ENOTSUPP;
} else if (BPF_SRC(insn->code) == BPF_X) {
if (!(*reg_mask & (dreg | sreg)))
return 0;
/* dreg <cond> sreg
* Both dreg and sreg need precision before
* this insn. If only sreg was marked precise
* before it would be equally necessary to
* propagate it to dreg.
*/
*reg_mask |= (sreg | dreg);
/* else dreg <cond> K
* Only dreg still needs precision before
* this insn, so for the K-based conditional
* there is nothing new to be marked.
*/
}
} else if (class == BPF_LD) {
if (!(*reg_mask & dreg))
Expand Down Expand Up @@ -3037,6 +3104,10 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
env->insn_aux_data[insn_idx].sanitize_stack_spill = true;
}

err = destroy_if_dynptr_stack_slot(env, state, spi);
if (err)
return err;

mark_stack_slot_scratched(env, spi);
if (reg && !(off % BPF_REG_SIZE) && register_is_bounded(reg) &&
!register_is_null(reg) && env->bpf_capable) {
Expand Down Expand Up @@ -3150,6 +3221,14 @@ static int check_stack_write_var_off(struct bpf_verifier_env *env,
if (err)
return err;

for (i = min_off; i < max_off; i++) {
int spi;

spi = get_spi(i);
err = destroy_if_dynptr_stack_slot(env, state, spi);
if (err)
return err;
}

/* Variable offset writes destroy any spilled pointers in range. */
for (i = min_off; i < max_off; i++) {
Expand Down Expand Up @@ -5111,6 +5190,31 @@ static int check_stack_range_initialized(
}

if (meta && meta->raw_mode) {
/* Ensure we won't be overwriting dynptrs when simulating byte
* by byte access in check_helper_call using meta.access_size.
* This would be a problem if we have a helper in the future
* which takes:
*
* helper(uninit_mem, len, dynptr)
*
* Now, uninint_mem may overlap with dynptr pointer. Hence, it
* may end up writing to dynptr itself when touching memory from
* arg 1. This can be relaxed on a case by case basis for known
* safe cases, but reject due to the possibilitiy of aliasing by
* default.
*/
for (i = min_off; i < max_off + access_size; i++) {
int stack_off = -i - 1;

spi = get_spi(i);
/* raw_mode may write past allocated_stack */
if (state->allocated_stack <= stack_off)
continue;
if (state->stack[spi].slot_type[stack_off % BPF_REG_SIZE] == STACK_DYNPTR) {
verbose(env, "potential write to dynptr at off=%d disallowed\n", i);
return -EACCES;
}
}
meta->access_size = access_size;
meta->regno = regno;
return 0;
Expand Down
4 changes: 4 additions & 0 deletions net/netfilter/nft_set_pipapo.c
Original file line number Diff line number Diff line change
Expand Up @@ -1981,6 +1981,10 @@ static void nft_pipapo_walk(const struct nft_ctx *ctx, struct nft_set *set,
goto cont;

e = f->mt[r].e;

if (!nft_set_elem_active(&e->ext, iter->genmask))
goto cont;

if (nft_set_elem_expired(&e->ext))
goto cont;

Expand Down
22 changes: 22 additions & 0 deletions tools/testing/selftests/bpf/progs/pyperf180.c
Original file line number Diff line number Diff line change
@@ -1,4 +1,26 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Facebook
#define STACK_MAX_LEN 180

/* llvm upstream commit at clang18
* https://github.com/llvm/llvm-project/commit/1a2e77cf9e11dbf56b5720c607313a566eebb16e
* changed inlining behavior and caused compilation failure as some branch
* target distance exceeded 16bit representation which is the maximum for
* cpu v1/v2/v3. Macro __BPF_CPU_VERSION__ is later implemented in clang18
* to specify which cpu version is used for compilation. So a smaller
* unroll_count can be set if __BPF_CPU_VERSION__ is less than 4, which
* reduced some branch target distances and resolved the compilation failure.
*
* To capture the case where a developer/ci uses clang18 but the corresponding
* repo checkpoint does not have __BPF_CPU_VERSION__, a smaller unroll_count
* will be set as well to prevent potential compilation failures.
*/
#ifdef __BPF_CPU_VERSION__
#if __BPF_CPU_VERSION__ < 4
#define UNROLL_COUNT 90
#endif
#elif __clang_major__ == 18
#define UNROLL_COUNT 90
#endif

#include "pyperf.h"