diff options
Diffstat (limited to 'kernel/trace/bpf_trace.c')
| -rw-r--r-- | kernel/trace/bpf_trace.c | 169 | 
1 files changed, 85 insertions, 84 deletions
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 3bbd3f0c810c..e8da032bb6fc 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -369,8 +369,6 @@ static const struct bpf_func_proto *bpf_get_probe_write_proto(void)  	return &bpf_probe_write_user_proto;  } -static DEFINE_RAW_SPINLOCK(trace_printk_lock); -  #define MAX_TRACE_PRINTK_VARARGS	3  #define BPF_TRACE_PRINTK_SIZE		1024 @@ -378,23 +376,22 @@ BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,  	   u64, arg2, u64, arg3)  {  	u64 args[MAX_TRACE_PRINTK_VARARGS] = { arg1, arg2, arg3 }; -	u32 *bin_args; -	static char buf[BPF_TRACE_PRINTK_SIZE]; -	unsigned long flags; +	struct bpf_bprintf_data data = { +		.get_bin_args	= true, +		.get_buf	= true, +	};  	int ret; -	ret = bpf_bprintf_prepare(fmt, fmt_size, args, &bin_args, -				  MAX_TRACE_PRINTK_VARARGS); +	ret = bpf_bprintf_prepare(fmt, fmt_size, args, +				  MAX_TRACE_PRINTK_VARARGS, &data);  	if (ret < 0)  		return ret; -	raw_spin_lock_irqsave(&trace_printk_lock, flags); -	ret = bstr_printf(buf, sizeof(buf), fmt, bin_args); +	ret = bstr_printf(data.buf, MAX_BPRINTF_BUF, fmt, data.bin_args); -	trace_bpf_trace_printk(buf); -	raw_spin_unlock_irqrestore(&trace_printk_lock, flags); +	trace_bpf_trace_printk(data.buf); -	bpf_bprintf_cleanup(); +	bpf_bprintf_cleanup(&data);  	return ret;  } @@ -427,30 +424,29 @@ const struct bpf_func_proto *bpf_get_trace_printk_proto(void)  	return &bpf_trace_printk_proto;  } -BPF_CALL_4(bpf_trace_vprintk, char *, fmt, u32, fmt_size, const void *, data, +BPF_CALL_4(bpf_trace_vprintk, char *, fmt, u32, fmt_size, const void *, args,  	   u32, data_len)  { -	static char buf[BPF_TRACE_PRINTK_SIZE]; -	unsigned long flags; +	struct bpf_bprintf_data data = { +		.get_bin_args	= true, +		.get_buf	= true, +	};  	int ret, num_args; -	u32 *bin_args;  	if (data_len & 7 || data_len > MAX_BPRINTF_VARARGS * 8 || -	    (data_len && !data)) +	    (data_len && !args))  		return -EINVAL;  	num_args = data_len / 8; -	ret = bpf_bprintf_prepare(fmt, fmt_size, data, &bin_args, num_args); +	ret = bpf_bprintf_prepare(fmt, fmt_size, args, num_args, &data);  	if (ret < 0)  		return ret; -	raw_spin_lock_irqsave(&trace_printk_lock, flags); -	ret = bstr_printf(buf, sizeof(buf), fmt, bin_args); +	ret = bstr_printf(data.buf, MAX_BPRINTF_BUF, fmt, data.bin_args); -	trace_bpf_trace_printk(buf); -	raw_spin_unlock_irqrestore(&trace_printk_lock, flags); +	trace_bpf_trace_printk(data.buf); -	bpf_bprintf_cleanup(); +	bpf_bprintf_cleanup(&data);  	return ret;  } @@ -472,23 +468,25 @@ const struct bpf_func_proto *bpf_get_trace_vprintk_proto(void)  }  BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size, -	   const void *, data, u32, data_len) +	   const void *, args, u32, data_len)  { +	struct bpf_bprintf_data data = { +		.get_bin_args	= true, +	};  	int err, num_args; -	u32 *bin_args;  	if (data_len & 7 || data_len > MAX_BPRINTF_VARARGS * 8 || -	    (data_len && !data)) +	    (data_len && !args))  		return -EINVAL;  	num_args = data_len / 8; -	err = bpf_bprintf_prepare(fmt, fmt_size, data, &bin_args, num_args); +	err = bpf_bprintf_prepare(fmt, fmt_size, args, num_args, &data);  	if (err < 0)  		return err; -	seq_bprintf(m, fmt, bin_args); +	seq_bprintf(m, fmt, data.bin_args); -	bpf_bprintf_cleanup(); +	bpf_bprintf_cleanup(&data);  	return seq_has_overflowed(m) ? -EOVERFLOW : 0;  } @@ -687,8 +685,7 @@ BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,  	}  	perf_sample_data_init(sd, 0, 0); -	sd->raw = &raw; -	sd->sample_flags |= PERF_SAMPLE_RAW; +	perf_sample_save_raw_data(sd, &raw);  	err = __bpf_perf_event_output(regs, map, flags, sd); @@ -746,8 +743,7 @@ u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,  	perf_fetch_caller_regs(regs);  	perf_sample_data_init(sd, 0, 0); -	sd->raw = &raw; -	sd->sample_flags |= PERF_SAMPLE_RAW; +	perf_sample_save_raw_data(sd, &raw);  	ret = __bpf_perf_event_output(regs, map, flags, sd);  out: @@ -833,6 +829,7 @@ static void do_bpf_send_signal(struct irq_work *entry)  	work = container_of(entry, struct send_signal_irq_work, irq_work);  	group_send_sig_info(work->sig, SEND_SIG_PRIV, work->task, work->type); +	put_task_struct(work->task);  }  static int bpf_send_signal_common(u32 sig, enum pid_type type) @@ -848,6 +845,9 @@ static int bpf_send_signal_common(u32 sig, enum pid_type type)  		return -EPERM;  	if (unlikely(!nmi_uaccess_okay()))  		return -EPERM; +	/* Task should not be pid=1 to avoid kernel panic. */ +	if (unlikely(is_global_init(current))) +		return -EPERM;  	if (irqs_disabled()) {  		/* Do an early check on signal validity. Otherwise, @@ -864,7 +864,7 @@ static int bpf_send_signal_common(u32 sig, enum pid_type type)  		 * to the irq_work. The current task may change when queued  		 * irq works get executed.  		 */ -		work->task = current; +		work->task = get_task_struct(current);  		work->sig = sig;  		work->type = type;  		irq_work_queue(&work->irq_work); @@ -1235,7 +1235,7 @@ __diag_ignore_all("-Wmissing-prototypes",   * Return: a bpf_key pointer with a valid key pointer if the key is found, a   *         NULL pointer otherwise.   */ -struct bpf_key *bpf_lookup_user_key(u32 serial, u64 flags) +__bpf_kfunc struct bpf_key *bpf_lookup_user_key(u32 serial, u64 flags)  {  	key_ref_t key_ref;  	struct bpf_key *bkey; @@ -1284,7 +1284,7 @@ struct bpf_key *bpf_lookup_user_key(u32 serial, u64 flags)   * Return: a bpf_key pointer with an invalid key pointer set from the   *         pre-determined ID on success, a NULL pointer otherwise   */ -struct bpf_key *bpf_lookup_system_key(u64 id) +__bpf_kfunc struct bpf_key *bpf_lookup_system_key(u64 id)  {  	struct bpf_key *bkey; @@ -1308,7 +1308,7 @@ struct bpf_key *bpf_lookup_system_key(u64 id)   * Decrement the reference count of the key inside *bkey*, if the pointer   * is valid, and free *bkey*.   */ -void bpf_key_put(struct bpf_key *bkey) +__bpf_kfunc void bpf_key_put(struct bpf_key *bkey)  {  	if (bkey->has_ref)  		key_put(bkey->key); @@ -1328,7 +1328,7 @@ void bpf_key_put(struct bpf_key *bkey)   *   * Return: 0 on success, a negative value on error.   */ -int bpf_verify_pkcs7_signature(struct bpf_dynptr_kern *data_ptr, +__bpf_kfunc int bpf_verify_pkcs7_signature(struct bpf_dynptr_kern *data_ptr,  			       struct bpf_dynptr_kern *sig_ptr,  			       struct bpf_key *trusted_keyring)  { @@ -2684,69 +2684,77 @@ static void symbols_swap_r(void *a, void *b, int size, const void *priv)  	}  } -struct module_addr_args { -	unsigned long *addrs; -	u32 addrs_cnt; +struct modules_array {  	struct module **mods;  	int mods_cnt;  	int mods_cap;  }; -static int module_callback(void *data, const char *name, -			   struct module *mod, unsigned long addr) +static int add_module(struct modules_array *arr, struct module *mod)  { -	struct module_addr_args *args = data;  	struct module **mods; -	/* We iterate all modules symbols and for each we: -	 * - search for it in provided addresses array -	 * - if found we check if we already have the module pointer stored -	 *   (we iterate modules sequentially, so we can check just the last -	 *   module pointer) -	 * - take module reference and store it -	 */ -	if (!bsearch(&addr, args->addrs, args->addrs_cnt, sizeof(addr), -		       bpf_kprobe_multi_addrs_cmp)) -		return 0; - -	if (args->mods && args->mods[args->mods_cnt - 1] == mod) -		return 0; - -	if (args->mods_cnt == args->mods_cap) { -		args->mods_cap = max(16, args->mods_cap * 3 / 2); -		mods = krealloc_array(args->mods, args->mods_cap, sizeof(*mods), GFP_KERNEL); +	if (arr->mods_cnt == arr->mods_cap) { +		arr->mods_cap = max(16, arr->mods_cap * 3 / 2); +		mods = krealloc_array(arr->mods, arr->mods_cap, sizeof(*mods), GFP_KERNEL);  		if (!mods)  			return -ENOMEM; -		args->mods = mods; +		arr->mods = mods;  	} -	if (!try_module_get(mod)) -		return -EINVAL; - -	args->mods[args->mods_cnt] = mod; -	args->mods_cnt++; +	arr->mods[arr->mods_cnt] = mod; +	arr->mods_cnt++;  	return 0;  } +static bool has_module(struct modules_array *arr, struct module *mod) +{ +	int i; + +	for (i = arr->mods_cnt - 1; i >= 0; i--) { +		if (arr->mods[i] == mod) +			return true; +	} +	return false; +} +  static int get_modules_for_addrs(struct module ***mods, unsigned long *addrs, u32 addrs_cnt)  { -	struct module_addr_args args = { -		.addrs     = addrs, -		.addrs_cnt = addrs_cnt, -	}; -	int err; +	struct modules_array arr = {}; +	u32 i, err = 0; + +	for (i = 0; i < addrs_cnt; i++) { +		struct module *mod; + +		preempt_disable(); +		mod = __module_address(addrs[i]); +		/* Either no module or we it's already stored  */ +		if (!mod || has_module(&arr, mod)) { +			preempt_enable(); +			continue; +		} +		if (!try_module_get(mod)) +			err = -EINVAL; +		preempt_enable(); +		if (err) +			break; +		err = add_module(&arr, mod); +		if (err) { +			module_put(mod); +			break; +		} +	}  	/* We return either err < 0 in case of error, ... */ -	err = module_kallsyms_on_each_symbol(module_callback, &args);  	if (err) { -		kprobe_multi_put_modules(args.mods, args.mods_cnt); -		kfree(args.mods); +		kprobe_multi_put_modules(arr.mods, arr.mods_cnt); +		kfree(arr.mods);  		return err;  	}  	/* or number of modules found if everything is ok. */ -	*mods = args.mods; -	return args.mods_cnt; +	*mods = arr.mods; +	return arr.mods_cnt;  }  int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) @@ -2859,13 +2867,6 @@ int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr  		       bpf_kprobe_multi_cookie_cmp,  		       bpf_kprobe_multi_cookie_swap,  		       link); -	} else { -		/* -		 * We need to sort addrs array even if there are no cookies -		 * provided, to allow bsearch in get_modules_for_addrs. -		 */ -		sort(addrs, cnt, sizeof(*addrs), -		       bpf_kprobe_multi_addrs_cmp, NULL);  	}  	err = get_modules_for_addrs(&link->mods, addrs, cnt);  | 
