summaryrefslogtreecommitdiff
path: root/net/ipv4/devinet.c
diff options
context:
space:
mode:
authorPetr Machata <petrm@nvidia.com>2023-03-21 12:51:59 +0100
committerDavid S. Miller <davem@davemloft.net>2023-03-23 08:32:52 +0000
commit5c4a9aa856c706def9239d1e43c4ea9fccb5c75a (patch)
tree3aad2809d41cd8a329b5a07315dad715a4622054 /net/ipv4/devinet.c
parent90bf6610a133288f28941deae39ddb131ffbd3ed (diff)
net: ipv4: Allow changing IPv4 address protocol
When IP address protocol field was added in commit 47f0bd503210 ("net: Add new protocol attribute to IP addresses"), the semantics included the ability to change the protocol for IPv6 addresses, but not for IPv4 addresses. It seems this was not deliberate, but rather by accident. A userspace that wants to change the protocol of an address might drop and recreate the address, but that disrupts routing and is just impractical. So in this patch, when an IPv4 address is replaced (through RTM_NEWADDR request with NLM_F_REPLACE flag), update the proto at the address to the one given in the request, or zero if none is given. This matches the behavior of IPv6. Previously, any new value given was simply ignored. Signed-off-by: Petr Machata <petrm@nvidia.com> Reviewed-by: Ido Schimmel <idosch@nvidia.com> Reviewed-by: David Ahern <dsahern@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/devinet.c')
-rw-r--r--net/ipv4/devinet.c3
1 files changed, 3 insertions, 0 deletions
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index b0acf6e19aed..5deac0517ef7 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -962,6 +962,7 @@ static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh,
extack);
} else {
u32 new_metric = ifa->ifa_rt_priority;
+ u8 new_proto = ifa->ifa_proto;
inet_free_ifa(ifa);
@@ -975,6 +976,8 @@ static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh,
ifa->ifa_rt_priority = new_metric;
}
+ ifa->ifa_proto = new_proto;
+
set_ifa_lifetime(ifa, valid_lft, prefered_lft);
cancel_delayed_work(&check_lifetime_work);
queue_delayed_work(system_power_efficient_wq,