summaryrefslogtreecommitdiff
path: root/tools/lib/bpf/libbpf.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-10-04 13:38:03 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2022-10-04 13:38:03 -0700
commit0326074ff4652329f2a1a9c8685104576bd8d131 (patch)
tree9a7574c7ccb05bf4c7cb34fc5a65457bb8f495cb /tools/lib/bpf/libbpf.c
parent522667b24f08009591c90e75bfe2ffb67f555498 (diff)
parent681bf011b9b5989c6e9db6beb64494918aab9a43 (diff)
Merge tag 'net-next-6.1' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next
Pull networking updates from Jakub Kicinski: "Core: - Introduce and use a single page frag cache for allocating small skb heads, clawing back the 10-20% performance regression in UDP flood test from previous fixes. - Run packets which already went thru HW coalescing thru SW GRO. This significantly improves TCP segment coalescing and simplifies deployments as different workloads benefit from HW or SW GRO. - Shrink the size of the base zero-copy send structure. - Move TCP init under a new slow / sleepable version of DO_ONCE(). BPF: - Add BPF-specific, any-context-safe memory allocator. - Add helpers/kfuncs for PKCS#7 signature verification from BPF programs. - Define a new map type and related helpers for user space -> kernel communication over a ring buffer (BPF_MAP_TYPE_USER_RINGBUF). - Allow targeting BPF iterators to loop through resources of one task/thread. - Add ability to call selected destructive functions. Expose crash_kexec() to allow BPF to trigger a kernel dump. Use CAP_SYS_BOOT check on the loading process to judge permissions. - Enable BPF to collect custom hierarchical cgroup stats efficiently by integrating with the rstat framework. - Support struct arguments for trampoline based programs. Only structs with size <= 16B and x86 are supported. - Invoke cgroup/connect{4,6} programs for unprivileged ICMP ping sockets (instead of just TCP and UDP sockets). - Add a helper for accessing CLOCK_TAI for time sensitive network related programs. - Support accessing network tunnel metadata's flags. - Make TCP SYN ACK RTO tunable by BPF programs with TCP Fast Open. - Add support for writing to Netfilter's nf_conn:mark. Protocols: - WiFi: more Extremely High Throughput (EHT) and Multi-Link Operation (MLO) work (802.11be, WiFi 7). - vsock: improve support for SO_RCVLOWAT. - SMC: support SO_REUSEPORT. - Netlink: define and document how to use netlink in a "modern" way. Support reporting missing attributes via extended ACK. - IPSec: support collect metadata mode for xfrm interfaces. - TCPv6: send consistent autoflowlabel in SYN_RECV state and RST packets. - TCP: introduce optional per-netns connection hash table to allow better isolation between namespaces (opt-in, at the cost of memory and cache pressure). - MPTCP: support TCP_FASTOPEN_CONNECT. - Add NEXT-C-SID support in Segment Routing (SRv6) End behavior. - Adjust IP_UNICAST_IF sockopt behavior for connected UDP sockets. - Open vSwitch: - Allow specifying ifindex of new interfaces. - Allow conntrack and metering in non-initial user namespace. - TLS: support the Korean ARIA-GCM crypto algorithm. - Remove DECnet support. Driver API: - Allow selecting the conduit interface used by each port in DSA switches, at runtime. - Ethernet Power Sourcing Equipment and Power Device support. - Add tc-taprio support for queueMaxSDU parameter, i.e. setting per traffic class max frame size for time-based packet schedules. - Support PHY rate matching - adapting between differing host-side and link-side speeds. - Introduce QUSGMII PHY mode and 1000BASE-KX interface mode. - Validate OF (device tree) nodes for DSA shared ports; make phylink-related properties mandatory on DSA and CPU ports. Enforcing more uniformity should allow transitioning to phylink. - Require that flash component name used during update matches one of the components for which version is reported by info_get(). - Remove "weight" argument from driver-facing NAPI API as much as possible. It's one of those magic knobs which seemed like a good idea at the time but is too indirect to use in practice. - Support offload of TLS connections with 256 bit keys. New hardware / drivers: - Ethernet: - Microchip KSZ9896 6-port Gigabit Ethernet Switch - Renesas Ethernet AVB (EtherAVB-IF) Gen4 SoCs - Analog Devices ADIN1110 and ADIN2111 industrial single pair Ethernet (10BASE-T1L) MAC+PHY. - Rockchip RV1126 Gigabit Ethernet (a version of stmmac IP). - Ethernet SFPs / modules: - RollBall / Hilink / Turris 10G copper SFPs - HALNy GPON module - WiFi: - CYW43439 SDIO chipset (brcmfmac) - CYW89459 PCIe chipset (brcmfmac) - BCM4378 on Apple platforms (brcmfmac) Drivers: - CAN: - gs_usb: HW timestamp support - Ethernet PHYs: - lan8814: cable diagnostics - Ethernet NICs: - Intel (100G): - implement control of FCS/CRC stripping - port splitting via devlink - L2TPv3 filtering offload - nVidia/Mellanox: - tunnel offload for sub-functions - MACSec offload, w/ Extended packet number and replay window offload - significantly restructure, and optimize the AF_XDP support, align the behavior with other vendors - Huawei: - configuring DSCP map for traffic class selection - querying standard FEC statistics - querying SerDes lane number via ethtool - Marvell/Cavium: - egress priority flow control - MACSec offload - AMD/SolarFlare: - PTP over IPv6 and raw Ethernet - small / embedded: - ax88772: convert to phylink (to support SFP cages) - altera: tse: convert to phylink - ftgmac100: support fixed link - enetc: standard Ethtool counters - macb: ZynqMP SGMII dynamic configuration support - tsnep: support multi-queue and use page pool - lan743x: Rx IP & TCP checksum offload - igc: add xdp frags support to ndo_xdp_xmit - Ethernet high-speed switches: - Marvell (prestera): - support SPAN port features (traffic mirroring) - nexthop object offloading - Microchip (sparx5): - multicast forwarding offload - QoS queuing offload (tc-mqprio, tc-tbf, tc-ets) - Ethernet embedded switches: - Marvell (mv88e6xxx): - support RGMII cmode - NXP (felix): - standardized ethtool counters - Microchip (lan966x): - QoS queuing offload (tc-mqprio, tc-tbf, tc-cbs, tc-ets) - traffic policing and mirroring - link aggregation / bonding offload - QUSGMII PHY mode support - Qualcomm 802.11ax WiFi (ath11k): - cold boot calibration support on WCN6750 - support to connect to a non-transmit MBSSID AP profile - enable remain-on-channel support on WCN6750 - Wake-on-WLAN support for WCN6750 - support to provide transmit power from firmware via nl80211 - support to get power save duration for each client - spectral scan support for 160 MHz - MediaTek WiFi (mt76): - WiFi-to-Ethernet bridging offload for MT7986 chips - RealTek WiFi (rtw89): - P2P support" * tag 'net-next-6.1' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next: (1864 commits) eth: pse: add missing static inlines once: rename _SLOW to _SLEEPABLE net: pse-pd: add regulator based PSE driver dt-bindings: net: pse-dt: add bindings for regulator based PoDL PSE controller ethtool: add interface to interact with Ethernet Power Equipment net: mdiobus: search for PSE nodes by parsing PHY nodes. net: mdiobus: fwnode_mdiobus_register_phy() rework error handling net: add framework to support Ethernet PSE and PDs devices dt-bindings: net: phy: add PoDL PSE property net: marvell: prestera: Propagate nh state from hw to kernel net: marvell: prestera: Add neighbour cache accounting net: marvell: prestera: add stub handler neighbour events net: marvell: prestera: Add heplers to interact with fib_notifier_info net: marvell: prestera: Add length macros for prestera_ip_addr net: marvell: prestera: add delayed wq and flush wq on deinit net: marvell: prestera: Add strict cleanup of fib arbiter net: marvell: prestera: Add cleanup of allocated fib_nodes net: marvell: prestera: Add router nexthops ABI eth: octeon: fix build after netif_napi_add() changes net/mlx5: E-Switch, Return EBUSY if can't get mode lock ...
Diffstat (limited to 'tools/lib/bpf/libbpf.c')
-rw-r--r--tools/lib/bpf/libbpf.c208
1 files changed, 122 insertions, 86 deletions
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 50d41815f431..184ce1684dcd 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -163,6 +163,7 @@ static const char * const map_type_name[] = {
[BPF_MAP_TYPE_INODE_STORAGE] = "inode_storage",
[BPF_MAP_TYPE_TASK_STORAGE] = "task_storage",
[BPF_MAP_TYPE_BLOOM_FILTER] = "bloom_filter",
+ [BPF_MAP_TYPE_USER_RINGBUF] = "user_ringbuf",
};
static const char * const prog_type_name[] = {
@@ -223,13 +224,18 @@ __printf(2, 3)
void libbpf_print(enum libbpf_print_level level, const char *format, ...)
{
va_list args;
+ int old_errno;
if (!__libbpf_pr)
return;
+ old_errno = errno;
+
va_start(args, format);
__libbpf_pr(level, format, args);
va_end(args);
+
+ errno = old_errno;
}
static void pr_perm_msg(int err)
@@ -412,6 +418,7 @@ struct bpf_program {
int fd;
bool autoload;
+ bool autoattach;
bool mark_btf_static;
enum bpf_prog_type type;
enum bpf_attach_type expected_attach_type;
@@ -591,7 +598,6 @@ struct elf_state {
size_t strtabidx;
struct elf_sec_desc *secs;
int sec_cnt;
- int maps_shndx;
int btf_maps_shndx;
__u32 btf_maps_sec_btf_id;
int text_shndx;
@@ -751,6 +757,8 @@ bpf_object__init_prog(struct bpf_object *obj, struct bpf_program *prog,
prog->autoload = true;
}
+ prog->autoattach = true;
+
/* inherit object's log_level */
prog->log_level = obj->log_level;
@@ -876,7 +884,7 @@ __u32 get_kernel_version(void)
__u32 major, minor, patch;
struct utsname info;
- if (access(ubuntu_kver_file, R_OK) == 0) {
+ if (faccessat(AT_FDCWD, ubuntu_kver_file, R_OK, AT_EACCESS) == 0) {
FILE *f;
f = fopen(ubuntu_kver_file, "r");
@@ -1272,7 +1280,6 @@ static struct bpf_object *bpf_object__new(const char *path,
*/
obj->efile.obj_buf = obj_buf;
obj->efile.obj_buf_sz = obj_buf_sz;
- obj->efile.maps_shndx = -1;
obj->efile.btf_maps_shndx = -1;
obj->efile.st_ops_shndx = -1;
obj->kconfig_map_idx = -1;
@@ -1642,6 +1649,10 @@ static int bpf_object__init_global_data_maps(struct bpf_object *obj)
for (sec_idx = 1; sec_idx < obj->efile.sec_cnt; sec_idx++) {
sec_desc = &obj->efile.secs[sec_idx];
+ /* Skip recognized sections with size 0. */
+ if (!sec_desc->data || sec_desc->data->d_size == 0)
+ continue;
+
switch (sec_desc->sec_type) {
case SEC_DATA:
sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx));
@@ -2086,19 +2097,30 @@ static bool get_map_field_int(const char *map_name, const struct btf *btf,
return true;
}
+static int pathname_concat(char *buf, size_t buf_sz, const char *path, const char *name)
+{
+ int len;
+
+ len = snprintf(buf, buf_sz, "%s/%s", path, name);
+ if (len < 0)
+ return -EINVAL;
+ if (len >= buf_sz)
+ return -ENAMETOOLONG;
+
+ return 0;
+}
+
static int build_map_pin_path(struct bpf_map *map, const char *path)
{
char buf[PATH_MAX];
- int len;
+ int err;
if (!path)
path = "/sys/fs/bpf";
- len = snprintf(buf, PATH_MAX, "%s/%s", path, bpf_map__name(map));
- if (len < 0)
- return -EINVAL;
- else if (len >= PATH_MAX)
- return -ENAMETOOLONG;
+ err = pathname_concat(buf, sizeof(buf), path, bpf_map__name(map));
+ if (err)
+ return err;
return bpf_map__set_pin_path(map, buf);
}
@@ -2362,6 +2384,12 @@ static size_t adjust_ringbuf_sz(size_t sz)
return sz;
}
+static bool map_is_ringbuf(const struct bpf_map *map)
+{
+ return map->def.type == BPF_MAP_TYPE_RINGBUF ||
+ map->def.type == BPF_MAP_TYPE_USER_RINGBUF;
+}
+
static void fill_map_from_def(struct bpf_map *map, const struct btf_map_def *def)
{
map->def.type = def->map_type;
@@ -2376,7 +2404,7 @@ static void fill_map_from_def(struct bpf_map *map, const struct btf_map_def *def
map->btf_value_type_id = def->value_type_id;
/* auto-adjust BPF ringbuf map max_entries to be a multiple of page size */
- if (map->def.type == BPF_MAP_TYPE_RINGBUF)
+ if (map_is_ringbuf(map))
map->def.max_entries = adjust_ringbuf_sz(map->def.max_entries);
if (def->parts & MAP_DEF_MAP_TYPE)
@@ -3359,7 +3387,8 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
if (err)
return err;
} else if (strcmp(name, "maps") == 0) {
- obj->efile.maps_shndx = idx;
+ pr_warn("elf: legacy map definitions in 'maps' section are not supported by libbpf v1.0+\n");
+ return -ENOTSUP;
} else if (strcmp(name, MAPS_ELF_SEC) == 0) {
obj->efile.btf_maps_shndx = idx;
} else if (strcmp(name, BTF_ELF_SEC) == 0) {
@@ -3891,8 +3920,7 @@ static bool bpf_object__shndx_is_data(const struct bpf_object *obj,
static bool bpf_object__shndx_is_maps(const struct bpf_object *obj,
int shndx)
{
- return shndx == obj->efile.maps_shndx ||
- shndx == obj->efile.btf_maps_shndx;
+ return shndx == obj->efile.btf_maps_shndx;
}
static enum libbpf_map_type
@@ -4277,11 +4305,12 @@ int bpf_map__set_autocreate(struct bpf_map *map, bool autocreate)
int bpf_map__reuse_fd(struct bpf_map *map, int fd)
{
- struct bpf_map_info info = {};
+ struct bpf_map_info info;
__u32 len = sizeof(info), name_len;
int new_fd, err;
char *new_name;
+ memset(&info, 0, len);
err = bpf_obj_get_info_by_fd(fd, &info, &len);
if (err && errno == EINVAL)
err = bpf_get_map_info_from_fdinfo(fd, &info);
@@ -4358,7 +4387,7 @@ int bpf_map__set_max_entries(struct bpf_map *map, __u32 max_entries)
map->def.max_entries = max_entries;
/* auto-adjust BPF ringbuf map max_entries to be a multiple of page size */
- if (map->def.type == BPF_MAP_TYPE_RINGBUF)
+ if (map_is_ringbuf(map))
map->def.max_entries = adjust_ringbuf_sz(map->def.max_entries);
return 0;
@@ -4408,14 +4437,23 @@ static int probe_fd(int fd)
static int probe_kern_prog_name(void)
{
+ const size_t attr_sz = offsetofend(union bpf_attr, prog_name);
struct bpf_insn insns[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
};
- int ret, insn_cnt = ARRAY_SIZE(insns);
+ union bpf_attr attr;
+ int ret;
+
+ memset(&attr, 0, attr_sz);
+ attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
+ attr.license = ptr_to_u64("GPL");
+ attr.insns = ptr_to_u64(insns);
+ attr.insn_cnt = (__u32)ARRAY_SIZE(insns);
+ libbpf_strlcpy(attr.prog_name, "libbpf_nametest", sizeof(attr.prog_name));
/* make sure loading with name works */
- ret = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, "test", "GPL", insns, insn_cnt, NULL);
+ ret = sys_bpf_prog_load(&attr, attr_sz, PROG_LOAD_ATTEMPTS);
return probe_fd(ret);
}
@@ -4430,7 +4468,7 @@ static int probe_kern_global_data(void)
};
int ret, map, insn_cnt = ARRAY_SIZE(insns);
- map = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, sizeof(int), 32, 1, NULL);
+ map = bpf_map_create(BPF_MAP_TYPE_ARRAY, "libbpf_global", sizeof(int), 32, 1, NULL);
if (map < 0) {
ret = -errno;
cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
@@ -4563,7 +4601,7 @@ static int probe_kern_array_mmap(void)
LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = BPF_F_MMAPABLE);
int fd;
- fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, sizeof(int), sizeof(int), 1, &opts);
+ fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, "libbpf_mmap", sizeof(int), sizeof(int), 1, &opts);
return probe_fd(fd);
}
@@ -4610,7 +4648,7 @@ static int probe_prog_bind_map(void)
};
int ret, map, prog, insn_cnt = ARRAY_SIZE(insns);
- map = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, sizeof(int), 32, 1, NULL);
+ map = bpf_map_create(BPF_MAP_TYPE_ARRAY, "libbpf_det_bind", sizeof(int), 32, 1, NULL);
if (map < 0) {
ret = -errno;
cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
@@ -4814,13 +4852,12 @@ bool kernel_supports(const struct bpf_object *obj, enum kern_feature_id feat_id)
static bool map_is_reuse_compat(const struct bpf_map *map, int map_fd)
{
- struct bpf_map_info map_info = {};
+ struct bpf_map_info map_info;
char msg[STRERR_BUFSIZE];
- __u32 map_info_len;
+ __u32 map_info_len = sizeof(map_info);
int err;
- map_info_len = sizeof(map_info);
-
+ memset(&map_info, 0, map_info_len);
err = bpf_obj_get_info_by_fd(map_fd, &map_info, &map_info_len);
if (err && errno == EINVAL)
err = bpf_get_map_info_from_fdinfo(map_fd, &map_info);
@@ -7244,8 +7281,6 @@ static int bpf_object_unload(struct bpf_object *obj)
return 0;
}
-int bpf_object__unload(struct bpf_object *obj) __attribute__((alias("bpf_object_unload")));
-
static int bpf_object__sanitize_maps(struct bpf_object *obj)
{
struct bpf_map *m;
@@ -7944,17 +7979,9 @@ int bpf_object__pin_maps(struct bpf_object *obj, const char *path)
continue;
if (path) {
- int len;
-
- len = snprintf(buf, PATH_MAX, "%s/%s", path,
- bpf_map__name(map));
- if (len < 0) {
- err = -EINVAL;
- goto err_unpin_maps;
- } else if (len >= PATH_MAX) {
- err = -ENAMETOOLONG;
+ err = pathname_concat(buf, sizeof(buf), path, bpf_map__name(map));
+ if (err)
goto err_unpin_maps;
- }
sanitize_pin_path(buf);
pin_path = buf;
} else if (!map->pin_path) {
@@ -7992,14 +8019,9 @@ int bpf_object__unpin_maps(struct bpf_object *obj, const char *path)
char buf[PATH_MAX];
if (path) {
- int len;
-
- len = snprintf(buf, PATH_MAX, "%s/%s", path,
- bpf_map__name(map));
- if (len < 0)
- return libbpf_err(-EINVAL);
- else if (len >= PATH_MAX)
- return libbpf_err(-ENAMETOOLONG);
+ err = pathname_concat(buf, sizeof(buf), path, bpf_map__name(map));
+ if (err)
+ return libbpf_err(err);
sanitize_pin_path(buf);
pin_path = buf;
} else if (!map->pin_path) {
@@ -8017,6 +8039,7 @@ int bpf_object__unpin_maps(struct bpf_object *obj, const char *path)
int bpf_object__pin_programs(struct bpf_object *obj, const char *path)
{
struct bpf_program *prog;
+ char buf[PATH_MAX];
int err;
if (!obj)
@@ -8028,17 +8051,9 @@ int bpf_object__pin_programs(struct bpf_object *obj, const char *path)
}
bpf_object__for_each_program(prog, obj) {
- char buf[PATH_MAX];
- int len;
-
- len = snprintf(buf, PATH_MAX, "%s/%s", path, prog->name);
- if (len < 0) {
- err = -EINVAL;
- goto err_unpin_programs;
- } else if (len >= PATH_MAX) {
- err = -ENAMETOOLONG;
+ err = pathname_concat(buf, sizeof(buf), path, prog->name);
+ if (err)
goto err_unpin_programs;
- }
err = bpf_program__pin(prog, buf);
if (err)
@@ -8049,13 +8064,7 @@ int bpf_object__pin_programs(struct bpf_object *obj, const char *path)
err_unpin_programs:
while ((prog = bpf_object__prev_program(obj, prog))) {
- char buf[PATH_MAX];
- int len;
-
- len = snprintf(buf, PATH_MAX, "%s/%s", path, prog->name);
- if (len < 0)
- continue;
- else if (len >= PATH_MAX)
+ if (pathname_concat(buf, sizeof(buf), path, prog->name))
continue;
bpf_program__unpin(prog, buf);
@@ -8074,13 +8083,10 @@ int bpf_object__unpin_programs(struct bpf_object *obj, const char *path)
bpf_object__for_each_program(prog, obj) {
char buf[PATH_MAX];
- int len;
- len = snprintf(buf, PATH_MAX, "%s/%s", path, prog->name);
- if (len < 0)
- return libbpf_err(-EINVAL);
- else if (len >= PATH_MAX)
- return libbpf_err(-ENAMETOOLONG);
+ err = pathname_concat(buf, sizeof(buf), path, prog->name);
+ if (err)
+ return libbpf_err(err);
err = bpf_program__unpin(prog, buf);
if (err)
@@ -8298,6 +8304,16 @@ int bpf_program__set_autoload(struct bpf_program *prog, bool autoload)
return 0;
}
+bool bpf_program__autoattach(const struct bpf_program *prog)
+{
+ return prog->autoattach;
+}
+
+void bpf_program__set_autoattach(struct bpf_program *prog, bool autoattach)
+{
+ prog->autoattach = autoattach;
+}
+
const struct bpf_insn *bpf_program__insns(const struct bpf_program *prog)
{
return prog->insns;
@@ -8978,11 +8994,12 @@ int libbpf_find_vmlinux_btf_id(const char *name,
static int libbpf_find_prog_btf_id(const char *name, __u32 attach_prog_fd)
{
- struct bpf_prog_info info = {};
+ struct bpf_prog_info info;
__u32 info_len = sizeof(info);
struct btf *btf;
int err;
+ memset(&info, 0, info_len);
err = bpf_obj_get_info_by_fd(attach_prog_fd, &info, &info_len);
if (err) {
pr_warn("failed bpf_obj_get_info_by_fd for FD %d: %d\n",
@@ -9056,11 +9073,15 @@ static int libbpf_find_attach_btf_id(struct bpf_program *prog, const char *attac
int err = 0;
/* BPF program's BTF ID */
- if (attach_prog_fd) {
+ if (prog->type == BPF_PROG_TYPE_EXT || attach_prog_fd) {
+ if (!attach_prog_fd) {
+ pr_warn("prog '%s': attach program FD is not set\n", prog->name);
+ return -EINVAL;
+ }
err = libbpf_find_prog_btf_id(attach_name, attach_prog_fd);
if (err < 0) {
- pr_warn("failed to find BPF program (FD %d) BTF ID for '%s': %d\n",
- attach_prog_fd, attach_name, err);
+ pr_warn("prog '%s': failed to find BPF program (FD %d) BTF ID for '%s': %d\n",
+ prog->name, attach_prog_fd, attach_name, err);
return err;
}
*btf_obj_fd = 0;
@@ -9077,7 +9098,8 @@ static int libbpf_find_attach_btf_id(struct bpf_program *prog, const char *attac
err = find_kernel_btf_id(prog->obj, attach_name, attach_type, btf_obj_fd, btf_type_id);
}
if (err) {
- pr_warn("failed to find kernel BTF type ID of '%s': %d\n", attach_name, err);
+ pr_warn("prog '%s': failed to find kernel BTF type ID of '%s': %d\n",
+ prog->name, attach_name, err);
return err;
}
return 0;
@@ -9810,13 +9832,16 @@ static int determine_uprobe_retprobe_bit(void)
static int perf_event_open_probe(bool uprobe, bool retprobe, const char *name,
uint64_t offset, int pid, size_t ref_ctr_off)
{
- struct perf_event_attr attr = {};
+ const size_t attr_sz = sizeof(struct perf_event_attr);
+ struct perf_event_attr attr;
char errmsg[STRERR_BUFSIZE];
int type, pfd;
if (ref_ctr_off >= (1ULL << PERF_UPROBE_REF_CTR_OFFSET_BITS))
return -EINVAL;
+ memset(&attr, 0, attr_sz);
+
type = uprobe ? determine_uprobe_perf_type()
: determine_kprobe_perf_type();
if (type < 0) {
@@ -9837,7 +9862,7 @@ static int perf_event_open_probe(bool uprobe, bool retprobe, const char *name,
}
attr.config |= 1 << bit;
}
- attr.size = sizeof(attr);
+ attr.size = attr_sz;
attr.type = type;
attr.config |= (__u64)ref_ctr_off << PERF_UPROBE_REF_CTR_OFFSET_SHIFT;
attr.config1 = ptr_to_u64(name); /* kprobe_func or uprobe_path */
@@ -9879,7 +9904,7 @@ static bool use_debugfs(void)
static int has_debugfs = -1;
if (has_debugfs < 0)
- has_debugfs = access(DEBUGFS, F_OK) == 0;
+ has_debugfs = faccessat(AT_FDCWD, DEBUGFS, F_OK, AT_EACCESS) == 0;
return has_debugfs == 1;
}
@@ -9936,7 +9961,8 @@ static int determine_kprobe_perf_type_legacy(const char *probe_name, bool retpro
static int perf_event_kprobe_open_legacy(const char *probe_name, bool retprobe,
const char *kfunc_name, size_t offset, int pid)
{
- struct perf_event_attr attr = {};
+ const size_t attr_sz = sizeof(struct perf_event_attr);
+ struct perf_event_attr attr;
char errmsg[STRERR_BUFSIZE];
int type, pfd, err;
@@ -9955,7 +9981,9 @@ static int perf_event_kprobe_open_legacy(const char *probe_name, bool retprobe,
libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
goto err_clean_legacy;
}
- attr.size = sizeof(attr);
+
+ memset(&attr, 0, attr_sz);
+ attr.size = attr_sz;
attr.config = type;
attr.type = PERF_TYPE_TRACEPOINT;
@@ -10412,6 +10440,7 @@ static int determine_uprobe_perf_type_legacy(const char *probe_name, bool retpro
static int perf_event_uprobe_open_legacy(const char *probe_name, bool retprobe,
const char *binary_path, size_t offset, int pid)
{
+ const size_t attr_sz = sizeof(struct perf_event_attr);
struct perf_event_attr attr;
int type, pfd, err;
@@ -10429,8 +10458,8 @@ static int perf_event_uprobe_open_legacy(const char *probe_name, bool retprobe,
goto err_clean_legacy;
}
- memset(&attr, 0, sizeof(attr));
- attr.size = sizeof(attr);
+ memset(&attr, 0, attr_sz);
+ attr.size = attr_sz;
attr.config = type;
attr.type = PERF_TYPE_TRACEPOINT;
@@ -10662,15 +10691,17 @@ static const char *arch_specific_lib_paths(void)
static int resolve_full_path(const char *file, char *result, size_t result_sz)
{
const char *search_paths[3] = {};
- int i;
+ int i, perm;
if (str_has_sfx(file, ".so") || strstr(file, ".so.")) {
search_paths[0] = getenv("LD_LIBRARY_PATH");
search_paths[1] = "/usr/lib64:/usr/lib";
search_paths[2] = arch_specific_lib_paths();
+ perm = R_OK;
} else {
search_paths[0] = getenv("PATH");
search_paths[1] = "/usr/bin:/usr/sbin";
+ perm = R_OK | X_OK;
}
for (i = 0; i < ARRAY_SIZE(search_paths); i++) {
@@ -10689,8 +10720,8 @@ static int resolve_full_path(const char *file, char *result, size_t result_sz)
if (!seg_len)
continue;
snprintf(result, result_sz, "%.*s/%s", seg_len, s, file);
- /* ensure it is an executable file/link */
- if (access(result, R_OK | X_OK) < 0)
+ /* ensure it has required permissions */
+ if (faccessat(AT_FDCWD, result, perm, AT_EACCESS) < 0)
continue;
pr_debug("resolved '%s' to '%s'\n", file, result);
return 0;
@@ -10967,7 +10998,8 @@ static int determine_tracepoint_id(const char *tp_category,
static int perf_event_open_tracepoint(const char *tp_category,
const char *tp_name)
{
- struct perf_event_attr attr = {};
+ const size_t attr_sz = sizeof(struct perf_event_attr);
+ struct perf_event_attr attr;
char errmsg[STRERR_BUFSIZE];
int tp_id, pfd, err;
@@ -10979,8 +11011,9 @@ static int perf_event_open_tracepoint(const char *tp_category,
return tp_id;
}
+ memset(&attr, 0, attr_sz);
attr.type = PERF_TYPE_TRACEPOINT;
- attr.size = sizeof(attr);
+ attr.size = attr_sz;
attr.config = tp_id;
pfd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, 0 /* cpu */,
@@ -11600,12 +11633,15 @@ struct perf_buffer *perf_buffer__new(int map_fd, size_t page_cnt,
void *ctx,
const struct perf_buffer_opts *opts)
{
+ const size_t attr_sz = sizeof(struct perf_event_attr);
struct perf_buffer_params p = {};
- struct perf_event_attr attr = {};
+ struct perf_event_attr attr;
if (!OPTS_VALID(opts, perf_buffer_opts))
return libbpf_err_ptr(-EINVAL);
+ memset(&attr, 0, attr_sz);
+ attr.size = attr_sz;
attr.config = PERF_COUNT_SW_BPF_OUTPUT;
attr.type = PERF_TYPE_SOFTWARE;
attr.sample_type = PERF_SAMPLE_RAW;
@@ -12328,7 +12364,7 @@ int bpf_object__attach_skeleton(struct bpf_object_skeleton *s)
struct bpf_program *prog = *s->progs[i].prog;
struct bpf_link **link = s->progs[i].link;
- if (!prog->autoload)
+ if (!prog->autoload || !prog->autoattach)
continue;
/* auto-attaching not supported for this program */