/* * common eBPF ELF operations. * * Copyright (C) 2013-2015 Alexei Starovoitov * Copyright (C) 2015 Wang Nan * Copyright (C) 2015 Huawei Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; * version 2.1 of the License (not later!) * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, see */ #include #include #include #include #include #include "bpf.h" /* * When building perf, unistd.h is overrided. __NR_bpf is * required to be defined explicitly. */ #ifndef __NR_bpf # if defined(__i386__) # define __NR_bpf 357 # elif defined(__x86_64__) # define __NR_bpf 321 # elif defined(__aarch64__) # define __NR_bpf 280 # else # error __NR_bpf not defined. libbpf does not support your arch. # endif #endif static __u64 ptr_to_u64(void *ptr) { return (__u64) (unsigned long) ptr; } static int sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr, unsigned int size) { return syscall(__NR_bpf, cmd, attr, size); } int bpf_create_map(enum bpf_map_type map_type, int key_size, int value_size, int max_entries, __u32 map_flags) { union bpf_attr attr; memset(&attr, '\0', sizeof(attr)); attr.map_type = map_type; attr.key_size = key_size; attr.value_size = value_size; attr.max_entries = max_entries; attr.map_flags = map_flags; return sys_bpf(BPF_MAP_CREATE, &attr, sizeof(attr)); } int bpf_load_program(enum bpf_prog_type type, struct bpf_insn *insns, size_t insns_cnt, char *license, __u32 kern_version, char *log_buf, size_t log_buf_sz) { int fd; union bpf_attr attr; bzero(&attr, sizeof(attr)); attr.prog_type = type; attr.insn_cnt = (__u32)insns_cnt; attr.insns = ptr_to_u64(insns); attr.license = ptr_to_u64(license); attr.log_buf = ptr_to_u64(NULL); attr.log_size = 0; attr.log_level = 0; attr.kern_version = kern_version; fd = sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr)); if (fd >= 0 || !log_buf || !log_buf_sz) return fd; /* Try again with log */ attr.log_buf = ptr_to_u64(log_buf); attr.log_size = log_buf_sz; attr.log_level = 1; log_buf[0] = 0; return sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr)); } int bpf_map_update_elem(int fd, void *key, void *value, __u64 flags) { union bpf_attr attr; bzero(&attr, sizeof(attr)); attr.map_fd = fd; attr.key = ptr_to_u64(key); attr.value = ptr_to_u64(value); attr.flags = flags; return sys_bpf(BPF_MAP_UPDATE_ELEM, &attr, sizeof(attr)); } int bpf_map_lookup_elem(int fd, void *key, void *value) { union bpf_attr attr; bzero(&attr, sizeof(attr)); attr.map_fd = fd; attr.key = ptr_to_u64(key); attr.value = ptr_to_u64(value); return sys_bpf(BPF_MAP_LOOKUP_ELEM, &attr, sizeof(attr)); } int bpf_map_delete_elem(int fd, void *key) { union bpf_attr attr; bzero(&attr, sizeof(attr)); attr.map_fd = fd; attr.key = ptr_to_u64(key); return sys_bpf(BPF_MAP_DELETE_ELEM, &attr, sizeof(attr)); } int bpf_map_get_next_key(int fd, void *key, void *next_key) { union bpf_attr attr; bzero(&attr, sizeof(attr)); attr.map_fd = fd; attr.key = ptr_to_u64(key); attr.next_key = ptr_to_u64(next_key); return sys_bpf(BPF_MAP_GET_NEXT_KEY, &attr, sizeof(attr)); } int bpf_obj_pin(int fd, const char *pathname) { union bpf_attr attr; bzero(&attr, sizeof(attr)); attr.pathname = ptr_to_u64((void *)pathname); attr.bpf_fd = fd; return sys_bpf(BPF_OBJ_PIN, &attr, sizeof(attr)); } int bpf_obj_get(const char *pathname) { union bpf_attr attr; bzero(&attr, sizeof(attr)); attr.pathname = ptr_to_u64((void *)pathname); return sys_bpf(BPF_OBJ_GET, &attr, sizeof(attr)); } int bpf_prog_attach(int prog_fd, int target_fd, enum bpf_attach_type type) { union bpf_attr attr; bzero(&attr, sizeof(attr)); attr.target_fd = target_fd; attr.attach_bpf_fd = prog_fd; attr.attach_type = type; return sys_bpf(BPF_PROG_ATTACH, &attr, sizeof(attr)); } int bpf_prog_detach(int target_fd, enum bpf_attach_type type) { union bpf_attr attr; bzero(&attr, sizeof(attr)); attr.target_fd = target_fd; attr.attach_type = type; return sys_bpf(BPF_PROG_DETACH, &attr, sizeof(attr)); } :28:22 -0800 committerDavid S. Miller <davem@davemloft.net>2017-01-30 14:28:22 -0800 commit54791b276b4000b307339f269d3bf7db877d536f (patch) tree1c2616bd373ce5ea28aac2a53e32f5b5834901ce /net/ipv6/udplite.c parent5d0e7705774dd412a465896d08d59a81a345c1e4 (diff)parent047487241ff59374fded8c477f21453681f5995c (diff)
Merge branch 'sparc64-non-resumable-user-error-recovery'
Liam R. Howlett says: ==================== sparc64: Recover from userspace non-resumable PIO & MEM errors A non-resumable error from userspace is able to cause a kernel panic or trap loop due to the setup and handling of the queued traps once in the kernel. This patch series addresses both of these issues. The queues are fixed by simply zeroing the memory before use. PIO errors from userspace will result in a SIGBUS being sent to the user process. The MEM errors form userspace will result in a SIGKILL and also cause the offending pages to be claimed so they are no longer used in future tasks. SIGKILL is used to ensure that the process does not try to coredump and result in an attempt to read the memory again from within kernel space. Although there is a HV call to scrub the memory (mem_scrub), there is no easy way to guarantee that the real memory address(es) are not used by other tasks. Clearing the error with mem_scrub would zero the memory and cause the other processes to proceed with bad data. The handling of other non-resumable errors remain unchanged and will cause a panic. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv6/udplite.c')