summaryrefslogtreecommitdiff
path: root/net/tls
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2019-02-01 15:00:55 -0800
committerDavid S. Miller <davem@davemloft.net>2019-02-01 15:00:55 -0800
commit665cf634e6d56b6864ec0e6d6c15fd5e6b8ad7e2 (patch)
tree3ab538712f5fafd8bd71afa89e459907140aa383 /net/tls
parentd3a5fd3c987c5e341bf78b79ef4d81080081b7d2 (diff)
parent8debd67e79daf655820557c51222004d0c43af43 (diff)
Merge branch 'tls-1.3-support'
Dave Watson says: ==================== net: tls: TLS 1.3 support This patchset adds 256bit keys and TLS1.3 support to the kernel TLS socket. TLS 1.3 is requested by passing TLS_1_3_VERSION in the setsockopt call, which changes the framing as required for TLS1.3. 256bit keys are requested by passing TLS_CIPHER_AES_GCM_256 in the sockopt. This is a fairly straightforward passthrough to the crypto framework. 256bit keys work with both TLS 1.2 and TLS 1.3 TLS 1.3 requires a different AAD layout, necessitating some minor refactoring. It also moves the message type byte to the encrypted portion of the message, instead of the cleartext header as it was in TLS1.2. This requires moving the control message handling to after decryption, but is otherwise similar. V1 -> V2 The first two patches were dropped, and sent separately, one as a bugfix to the net tree. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/tls')
-rw-r--r--net/tls/tls_device.c5
-rw-r--r--net/tls/tls_device_fallback.c3
-rw-r--r--net/tls/tls_main.c36
-rw-r--r--net/tls/tls_sw.c244
4 files changed, 207 insertions, 81 deletions
diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
index d753e362d2d9..7ee9008b2187 100644
--- a/net/tls/tls_device.c
+++ b/net/tls/tls_device.c
@@ -257,7 +257,8 @@ static int tls_push_record(struct sock *sk,
tls_fill_prepend(ctx,
skb_frag_address(frag),
record->len - ctx->tx.prepend_size,
- record_type);
+ record_type,
+ ctx->crypto_send.info.version);
/* HW doesn't care about the data in the tag, because it fills it. */
dummy_tag_frag.page = skb_frag_page(frag);
@@ -270,7 +271,7 @@ static int tls_push_record(struct sock *sk,
spin_unlock_irq(&offload_ctx->lock);
offload_ctx->open_record = NULL;
set_bit(TLS_PENDING_CLOSED_RECORD, &ctx->flags);
- tls_advance_record_sn(sk, &ctx->tx);
+ tls_advance_record_sn(sk, &ctx->tx, ctx->crypto_send.info.version);
for (i = 0; i < record->num_frags; i++) {
frag = &record->frags[i];
diff --git a/net/tls/tls_device_fallback.c b/net/tls/tls_device_fallback.c
index 450a6dbc5a88..54c3a758f2a7 100644
--- a/net/tls/tls_device_fallback.c
+++ b/net/tls/tls_device_fallback.c
@@ -73,7 +73,8 @@ static int tls_enc_record(struct aead_request *aead_req,
len -= TLS_CIPHER_AES_GCM_128_IV_SIZE;
tls_make_aad(aad, len - TLS_CIPHER_AES_GCM_128_TAG_SIZE,
- (char *)&rcd_sn, sizeof(rcd_sn), buf[0]);
+ (char *)&rcd_sn, sizeof(rcd_sn), buf[0],
+ TLS_1_2_VERSION);
memcpy(iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, buf + TLS_HEADER_SIZE,
TLS_CIPHER_AES_GCM_128_IV_SIZE);
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
index d36d095cbcf0..d1c2fd9a3f63 100644
--- a/net/tls/tls_main.c
+++ b/net/tls/tls_main.c
@@ -372,6 +372,30 @@ static int do_tls_getsockopt_tx(struct sock *sk, char __user *optval,
rc = -EFAULT;
break;
}
+ case TLS_CIPHER_AES_GCM_256: {
+ struct tls12_crypto_info_aes_gcm_256 *
+ crypto_info_aes_gcm_256 =
+ container_of(crypto_info,
+ struct tls12_crypto_info_aes_gcm_256,
+ info);
+
+ if (len != sizeof(*crypto_info_aes_gcm_256)) {
+ rc = -EINVAL;
+ goto out;
+ }
+ lock_sock(sk);
+ memcpy(crypto_info_aes_gcm_256->iv,
+ ctx->tx.iv + TLS_CIPHER_AES_GCM_256_SALT_SIZE,
+ TLS_CIPHER_AES_GCM_256_IV_SIZE);
+ memcpy(crypto_info_aes_gcm_256->rec_seq, ctx->tx.rec_seq,
+ TLS_CIPHER_AES_GCM_256_REC_SEQ_SIZE);
+ release_sock(sk);
+ if (copy_to_user(optval,
+ crypto_info_aes_gcm_256,
+ sizeof(*crypto_info_aes_gcm_256)))
+ rc = -EFAULT;
+ break;
+ }
default:
rc = -EINVAL;
}
@@ -412,6 +436,7 @@ static int do_tls_setsockopt_conf(struct sock *sk, char __user *optval,
{
struct tls_crypto_info *crypto_info;
struct tls_context *ctx = tls_get_ctx(sk);
+ size_t optsize;
int rc = 0;
int conf;
@@ -438,14 +463,19 @@ static int do_tls_setsockopt_conf(struct sock *sk, char __user *optval,
}
/* check version */
- if (crypto_info->version != TLS_1_2_VERSION) {
+ if (crypto_info->version != TLS_1_2_VERSION &&
+ crypto_info->version != TLS_1_3_VERSION) {
rc = -ENOTSUPP;
goto err_crypto_info;
}
switch (crypto_info->cipher_type) {
- case TLS_CIPHER_AES_GCM_128: {
- if (optlen != sizeof(struct tls12_crypto_info_aes_gcm_128)) {
+ case TLS_CIPHER_AES_GCM_128:
+ case TLS_CIPHER_AES_GCM_256: {
+ optsize = crypto_info->cipher_type == TLS_CIPHER_AES_GCM_128 ?
+ sizeof(struct tls12_crypto_info_aes_gcm_128) :
+ sizeof(struct tls12_crypto_info_aes_gcm_256);
+ if (optlen != optsize) {
rc = -EINVAL;
goto err_crypto_info;
}
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index 3f2a6af27e62..06d7ae97b929 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -120,6 +120,34 @@ static int skb_nsg(struct sk_buff *skb, int offset, int len)
return __skb_nsg(skb, offset, len, 0);
}
+static int padding_length(struct tls_sw_context_rx *ctx,
+ struct tls_context *tls_ctx, struct sk_buff *skb)
+{
+ struct strp_msg *rxm = strp_msg(skb);
+ int sub = 0;
+
+ /* Determine zero-padding length */
+ if (tls_ctx->crypto_recv.info.version == TLS_1_3_VERSION) {
+ char content_type = 0;
+ int err;
+ int back = 17;
+
+ while (content_type == 0) {
+ if (back > rxm->full_len)
+ return -EBADMSG;
+ err = skb_copy_bits(skb,
+ rxm->offset + rxm->full_len - back,
+ &content_type, 1);
+ if (content_type)
+ break;
+ sub++;
+ back++;
+ }
+ ctx->control = content_type;
+ }
+ return sub;
+}
+
static void tls_decrypt_done(struct crypto_async_request *req, int err)
{
struct aead_request *aead_req = (struct aead_request *)req;
@@ -142,7 +170,7 @@ static void tls_decrypt_done(struct crypto_async_request *req, int err)
tls_err_abort(skb->sk, err);
} else {
struct strp_msg *rxm = strp_msg(skb);
-
+ rxm->full_len -= padding_length(ctx, tls_ctx, skb);
rxm->offset += tls_ctx->rx.prepend_size;
rxm->full_len -= tls_ctx->rx.overhead_size;
}
@@ -185,7 +213,7 @@ static int tls_do_decryption(struct sock *sk,
int ret;
aead_request_set_tfm(aead_req, ctx->aead_recv);
- aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE);
+ aead_request_set_ad(aead_req, tls_ctx->rx.aad_size);
aead_request_set_crypt(aead_req, sgin, sgout,
data_len + tls_ctx->rx.tag_size,
(u8 *)iv_recv);
@@ -289,12 +317,12 @@ static struct tls_rec *tls_get_rec(struct sock *sk)
sg_init_table(rec->sg_aead_in, 2);
sg_set_buf(&rec->sg_aead_in[0], rec->aad_space,
- sizeof(rec->aad_space));
+ tls_ctx->tx.aad_size);
sg_unmark_end(&rec->sg_aead_in[1]);
sg_init_table(rec->sg_aead_out, 2);
sg_set_buf(&rec->sg_aead_out[0], rec->aad_space,
- sizeof(rec->aad_space));
+ tls_ctx->tx.aad_size);
sg_unmark_end(&rec->sg_aead_out[1]);
return rec;
@@ -448,6 +476,8 @@ static int tls_do_encryption(struct sock *sk,
int rc;
memcpy(rec->iv_data, tls_ctx->tx.iv, sizeof(rec->iv_data));
+ xor_iv_with_seq(tls_ctx->crypto_send.info.version, rec->iv_data,
+ tls_ctx->tx.rec_seq);
sge->offset += tls_ctx->tx.prepend_size;
sge->length -= tls_ctx->tx.prepend_size;
@@ -455,7 +485,7 @@ static int tls_do_encryption(struct sock *sk,
msg_en->sg.curr = start;
aead_request_set_tfm(aead_req, ctx->aead_send);
- aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE);
+ aead_request_set_ad(aead_req, tls_ctx->tx.aad_size);
aead_request_set_crypt(aead_req, rec->sg_aead_in,
rec->sg_aead_out,
data_len, rec->iv_data);
@@ -483,7 +513,8 @@ static int tls_do_encryption(struct sock *sk,
/* Unhook the record from context if encryption is not failure */
ctx->open_rec = NULL;
- tls_advance_record_sn(sk, &tls_ctx->tx);
+ tls_advance_record_sn(sk, &tls_ctx->tx,
+ tls_ctx->crypto_send.info.version);
return rc;
}
@@ -640,7 +671,17 @@ static int tls_push_record(struct sock *sk, int flags,
i = msg_pl->sg.end;
sk_msg_iter_var_prev(i);
- sg_mark_end(sk_msg_elem(msg_pl, i));
+
+ rec->content_type = record_type;
+ if (tls_ctx->crypto_send.info.version == TLS_1_3_VERSION) {
+ /* Add content type to end of message. No padding added */
+ sg_set_buf(&rec->sg_content_type, &rec->content_type, 1);
+ sg_mark_end(&rec->sg_content_type);
+ sg_chain(msg_pl->sg.data, msg_pl->sg.end + 1,
+ &rec->sg_content_type);
+ } else {
+ sg_mark_end(sk_msg_elem(msg_pl, i));
+ }
i = msg_pl->sg.start;
sg_chain(rec->sg_aead_in, 2, rec->inplace_crypto ?
@@ -653,18 +694,22 @@ static int tls_push_record(struct sock *sk, int flags,
i = msg_en->sg.start;
sg_chain(rec->sg_aead_out, 2, &msg_en->sg.data[i]);
- tls_make_aad(rec->aad_space, msg_pl->sg.size,
+ tls_make_aad(rec->aad_space, msg_pl->sg.size + tls_ctx->tx.tail_size,
tls_ctx->tx.rec_seq, tls_ctx->tx.rec_seq_size,
- record_type);
+ record_type,
+ tls_ctx->crypto_send.info.version);
tls_fill_prepend(tls_ctx,
page_address(sg_page(&msg_en->sg.data[i])) +
- msg_en->sg.data[i].offset, msg_pl->sg.size,
- record_type);
+ msg_en->sg.data[i].offset,
+ msg_pl->sg.size + tls_ctx->tx.tail_size,
+ record_type,
+ tls_ctx->crypto_send.info.version);
tls_ctx->pending_open_record_frags = false;
- rc = tls_do_encryption(sk, tls_ctx, ctx, req, msg_pl->sg.size, i);
+ rc = tls_do_encryption(sk, tls_ctx, ctx, req,
+ msg_pl->sg.size + tls_ctx->tx.tail_size, i);
if (rc < 0) {
if (rc != -EINPROGRESS) {
tls_err_abort(sk, EBADMSG);
@@ -1292,7 +1337,8 @@ static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
u8 *aad, *iv, *mem = NULL;
struct scatterlist *sgin = NULL;
struct scatterlist *sgout = NULL;
- const int data_len = rxm->full_len - tls_ctx->rx.overhead_size;
+ const int data_len = rxm->full_len - tls_ctx->rx.overhead_size +
+ tls_ctx->rx.tail_size;
if (*zc && (out_iov || out_sg)) {
if (out_iov)
@@ -1317,7 +1363,7 @@ static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
aead_size = sizeof(*aead_req) + crypto_aead_reqsize(ctx->aead_recv);
mem_size = aead_size + (nsg * sizeof(struct scatterlist));
- mem_size = mem_size + TLS_AAD_SPACE_SIZE;
+ mem_size = mem_size + tls_ctx->rx.aad_size;
mem_size = mem_size + crypto_aead_ivsize(ctx->aead_recv);
/* Allocate a single block of memory which contains
@@ -1333,7 +1379,7 @@ static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
sgin = (struct scatterlist *)(mem + aead_size);
sgout = sgin + n_sgin;
aad = (u8 *)(sgout + n_sgout);
- iv = aad + TLS_AAD_SPACE_SIZE;
+ iv = aad + tls_ctx->rx.aad_size;
/* Prepare IV */
err = skb_copy_bits(skb, rxm->offset + TLS_HEADER_SIZE,
@@ -1343,16 +1389,24 @@ static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
kfree(mem);
return err;
}
- memcpy(iv, tls_ctx->rx.iv, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
+ if (tls_ctx->crypto_recv.info.version == TLS_1_3_VERSION)
+ memcpy(iv, tls_ctx->rx.iv, crypto_aead_ivsize(ctx->aead_recv));
+ else
+ memcpy(iv, tls_ctx->rx.iv, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
+
+ xor_iv_with_seq(tls_ctx->crypto_recv.info.version, iv,
+ tls_ctx->rx.rec_seq);
/* Prepare AAD */
- tls_make_aad(aad, rxm->full_len - tls_ctx->rx.overhead_size,
+ tls_make_aad(aad, rxm->full_len - tls_ctx->rx.overhead_size +
+ tls_ctx->rx.tail_size,
tls_ctx->rx.rec_seq, tls_ctx->rx.rec_seq_size,
- ctx->control);
+ ctx->control,
+ tls_ctx->crypto_recv.info.version);
/* Prepare sgin */
sg_init_table(sgin, n_sgin);
- sg_set_buf(&sgin[0], aad, TLS_AAD_SPACE_SIZE);
+ sg_set_buf(&sgin[0], aad, tls_ctx->rx.aad_size);
err = skb_to_sgvec(skb, &sgin[1],
rxm->offset + tls_ctx->rx.prepend_size,
rxm->full_len - tls_ctx->rx.prepend_size);
@@ -1364,7 +1418,7 @@ static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
if (n_sgout) {
if (out_iov) {
sg_init_table(sgout, n_sgout);
- sg_set_buf(&sgout[0], aad, TLS_AAD_SPACE_SIZE);
+ sg_set_buf(&sgout[0], aad, tls_ctx->rx.aad_size);
*chunk = 0;
err = tls_setup_from_iter(sk, out_iov, data_len,
@@ -1405,6 +1459,7 @@ static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb,
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
+ int version = tls_ctx->crypto_recv.info.version;
struct strp_msg *rxm = strp_msg(skb);
int err = 0;
@@ -1417,20 +1472,23 @@ static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb,
err = decrypt_internal(sk, skb, dest, NULL, chunk, zc, async);
if (err < 0) {
if (err == -EINPROGRESS)
- tls_advance_record_sn(sk, &tls_ctx->rx);
+ tls_advance_record_sn(sk, &tls_ctx->rx,
+ version);
return err;
}
+
+ rxm->full_len -= padding_length(ctx, tls_ctx, skb);
+
+ rxm->offset += tls_ctx->rx.prepend_size;
+ rxm->full_len -= tls_ctx->rx.overhead_size;
+ tls_advance_record_sn(sk, &tls_ctx->rx, version);
+ ctx->decrypted = true;
+ ctx->saved_data_ready(sk);
} else {
*zc = false;
}
- rxm->offset += tls_ctx->rx.prepend_size;
- rxm->full_len -= tls_ctx->rx.overhead_size;
- tls_advance_record_sn(sk, &tls_ctx->rx);
- ctx->decrypted = true;
- ctx->saved_data_ready(sk);
-
return err;
}
@@ -1609,6 +1667,26 @@ int tls_sw_recvmsg(struct sock *sk,
rxm = strp_msg(skb);
+ to_decrypt = rxm->full_len - tls_ctx->rx.overhead_size;
+
+ if (to_decrypt <= len && !is_kvec && !is_peek &&
+ ctx->control == TLS_RECORD_TYPE_DATA &&
+ tls_ctx->crypto_recv.info.version != TLS_1_3_VERSION)
+ zc = true;
+
+ err = decrypt_skb_update(sk, skb, &msg->msg_iter,
+ &chunk, &zc, ctx->async_capable);
+ if (err < 0 && err != -EINPROGRESS) {
+ tls_err_abort(sk, EBADMSG);
+ goto recv_end;
+ }
+
+ if (err == -EINPROGRESS) {
+ async = true;
+ num_async++;
+ goto pick_next_record;
+ }
+
if (!cmsg) {
int cerr;
@@ -1626,40 +1704,22 @@ int tls_sw_recvmsg(struct sock *sk,
goto recv_end;
}
- to_decrypt = rxm->full_len - tls_ctx->rx.overhead_size;
-
- if (to_decrypt <= len && !is_kvec && !is_peek)
- zc = true;
-
- err = decrypt_skb_update(sk, skb, &msg->msg_iter,
- &chunk, &zc, ctx->async_capable);
- if (err < 0 && err != -EINPROGRESS) {
- tls_err_abort(sk, EBADMSG);
- goto recv_end;
- }
-
- if (err == -EINPROGRESS) {
- async = true;
- num_async++;
- goto pick_next_record;
- } else {
- if (!zc) {
- if (rxm->full_len > len) {
- retain_skb = true;
- chunk = len;
- } else {
- chunk = rxm->full_len;
- }
+ if (!zc) {
+ if (rxm->full_len > len) {
+ retain_skb = true;
+ chunk = len;
+ } else {
+ chunk = rxm->full_len;
+ }
- err = skb_copy_datagram_msg(skb, rxm->offset,
- msg, chunk);
- if (err < 0)
- goto recv_end;
+ err = skb_copy_datagram_msg(skb, rxm->offset,
+ msg, chunk);
+ if (err < 0)
+ goto recv_end;
- if (!is_peek) {
- rxm->offset = rxm->offset + chunk;
- rxm->full_len = rxm->full_len - chunk;
- }
+ if (!is_peek) {
+ rxm->offset = rxm->offset + chunk;
+ rxm->full_len = rxm->full_len - chunk;
}
}
@@ -1759,15 +1819,15 @@ ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
if (!skb)
goto splice_read_end;
- /* splice does not support reading control messages */
- if (ctx->control != TLS_RECORD_TYPE_DATA) {
- err = -ENOTSUPP;
- goto splice_read_end;
- }
-
if (!ctx->decrypted) {
err = decrypt_skb_update(sk, skb, NULL, &chunk, &zc, false);
+ /* splice does not support reading control messages */
+ if (ctx->control != TLS_RECORD_TYPE_DATA) {
+ err = -ENOTSUPP;
+ goto splice_read_end;
+ }
+
if (err < 0) {
tls_err_abort(sk, EBADMSG);
goto splice_read_end;
@@ -1835,9 +1895,12 @@ static int tls_read_size(struct strparser *strp, struct sk_buff *skb)
data_len = ((header[4] & 0xFF) | (header[3] << 8));
- cipher_overhead = tls_ctx->rx.tag_size + tls_ctx->rx.iv_size;
+ cipher_overhead = tls_ctx->rx.tag_size;
+ if (tls_ctx->crypto_recv.info.version != TLS_1_3_VERSION)
+ cipher_overhead += tls_ctx->rx.iv_size;
- if (data_len > TLS_MAX_PAYLOAD_SIZE + cipher_overhead) {
+ if (data_len > TLS_MAX_PAYLOAD_SIZE + cipher_overhead +
+ tls_ctx->rx.tail_size) {
ret = -EMSGSIZE;
goto read_failure;
}
@@ -1846,12 +1909,12 @@ static int tls_read_size(struct strparser *strp, struct sk_buff *skb)
goto read_failure;
}
- if (header[1] != TLS_VERSION_MINOR(tls_ctx->crypto_recv.info.version) ||
- header[2] != TLS_VERSION_MAJOR(tls_ctx->crypto_recv.info.version)) {
+ /* Note that both TLS1.3 and TLS1.2 use TLS_1_2 version here */
+ if (header[1] != TLS_1_2_VERSION_MINOR ||
+ header[2] != TLS_1_2_VERSION_MAJOR) {
ret = -EINVAL;
goto read_failure;
}
-
#ifdef CONFIG_TLS_DEVICE
handle_device_resync(strp->sk, TCP_SKB_CB(skb)->seq + rxm->offset,
*(u64*)tls_ctx->rx.rec_seq);
@@ -1999,6 +2062,7 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
{
struct tls_crypto_info *crypto_info;
struct tls12_crypto_info_aes_gcm_128 *gcm_128_info;
+ struct tls12_crypto_info_aes_gcm_256 *gcm_256_info;
struct tls_sw_context_tx *sw_ctx_tx = NULL;
struct tls_sw_context_rx *sw_ctx_rx = NULL;
struct cipher_context *cctx;
@@ -2006,7 +2070,8 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
struct strp_callbacks cb;
u16 nonce_size, tag_size, iv_size, rec_seq_size;
struct crypto_tfm *tfm;
- char *iv, *rec_seq;
+ char *iv, *rec_seq, *key, *salt;
+ size_t keysize;
int rc = 0;
if (!ctx) {
@@ -2067,6 +2132,24 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->rec_seq;
gcm_128_info =
(struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
+ keysize = TLS_CIPHER_AES_GCM_128_KEY_SIZE;
+ key = gcm_128_info->key;
+ salt = gcm_128_info->salt;
+ break;
+ }
+ case TLS_CIPHER_AES_GCM_256: {
+ nonce_size = TLS_CIPHER_AES_GCM_256_IV_SIZE;
+ tag_size = TLS_CIPHER_AES_GCM_256_TAG_SIZE;
+ iv_size = TLS_CIPHER_AES_GCM_256_IV_SIZE;
+ iv = ((struct tls12_crypto_info_aes_gcm_256 *)crypto_info)->iv;
+ rec_seq_size = TLS_CIPHER_AES_GCM_256_REC_SEQ_SIZE;
+ rec_seq =
+ ((struct tls12_crypto_info_aes_gcm_256 *)crypto_info)->rec_seq;
+ gcm_256_info =
+ (struct tls12_crypto_info_aes_gcm_256 *)crypto_info;
+ keysize = TLS_CIPHER_AES_GCM_256_KEY_SIZE;
+ key = gcm_256_info->key;
+ salt = gcm_256_info->salt;
break;
}
default:
@@ -2080,9 +2163,19 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
goto free_priv;
}
+ if (crypto_info->version == TLS_1_3_VERSION) {
+ nonce_size = 0;
+ cctx->aad_size = TLS_HEADER_SIZE;
+ cctx->tail_size = 1;
+ } else {
+ cctx->aad_size = TLS_AAD_SPACE_SIZE;
+ cctx->tail_size = 0;
+ }
+
cctx->prepend_size = TLS_HEADER_SIZE + nonce_size;
cctx->tag_size = tag_size;
- cctx->overhead_size = cctx->prepend_size + cctx->tag_size;
+ cctx->overhead_size = cctx->prepend_size + cctx->tag_size +
+ cctx->tail_size;
cctx->iv_size = iv_size;
cctx->iv = kmalloc(iv_size + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
GFP_KERNEL);
@@ -2090,7 +2183,8 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
rc = -ENOMEM;
goto free_priv;
}
- memcpy(cctx->iv, gcm_128_info->salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
+ /* Note: 128 & 256 bit salt are the same size */
+ memcpy(cctx->iv, salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
memcpy(cctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv, iv_size);
cctx->rec_seq_size = rec_seq_size;
cctx->rec_seq = kmemdup(rec_seq, rec_seq_size, GFP_KERNEL);
@@ -2110,8 +2204,8 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
ctx->push_pending_record = tls_sw_push_pending_record;
- rc = crypto_aead_setkey(*aead, gcm_128_info->key,
- TLS_CIPHER_AES_GCM_128_KEY_SIZE);
+ rc = crypto_aead_setkey(*aead, key, keysize);
+
if (rc)
goto free_aead;