cipher.c 25 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114
  1. /*
  2. *
  3. * Embedded Linux library
  4. *
  5. * Copyright (C) 2015 Intel Corporation. All rights reserved.
  6. *
  7. * This library is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * This library is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with this library; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  20. *
  21. */
  22. #ifdef HAVE_CONFIG_H
  23. #include <config.h>
  24. #endif
  25. #define _GNU_SOURCE
  26. #include <unistd.h>
  27. #include <stdbool.h>
  28. #include <stdint.h>
  29. #include <errno.h>
  30. #include <sys/socket.h>
  31. #include <alloca.h>
  32. #include "useful.h"
  33. #include "cipher.h"
  34. #include "private.h"
  35. #include "random.h"
  36. #include "missing.h"
  37. #ifndef HAVE_LINUX_IF_ALG_H
  38. #ifndef HAVE_LINUX_TYPES_H
  39. typedef uint8_t __u8;
  40. typedef uint16_t __u16;
  41. typedef uint32_t __u32;
  42. #else
  43. #include <linux/types.h>
  44. #endif
  45. #ifndef AF_ALG
  46. #define AF_ALG 38
  47. #define PF_ALG AF_ALG
  48. #endif
  49. struct sockaddr_alg {
  50. __u16 salg_family;
  51. __u8 salg_type[14];
  52. __u32 salg_feat;
  53. __u32 salg_mask;
  54. __u8 salg_name[64];
  55. };
  56. struct af_alg_iv {
  57. __u32 ivlen;
  58. __u8 iv[0];
  59. };
  60. /* Socket options */
  61. #define ALG_SET_KEY 1
  62. #define ALG_SET_IV 2
  63. #define ALG_SET_OP 3
  64. /* Operations */
  65. #define ALG_OP_DECRYPT 0
  66. #define ALG_OP_ENCRYPT 1
  67. #else
  68. #include <linux/if_alg.h>
  69. #endif
  70. #ifndef SOL_ALG
  71. #define SOL_ALG 279
  72. #endif
  73. #ifndef ALG_SET_AEAD_ASSOCLEN
  74. #define ALG_SET_AEAD_ASSOCLEN 4
  75. #endif
  76. #ifndef ALG_SET_AEAD_AUTHSIZE
  77. #define ALG_SET_AEAD_AUTHSIZE 5
  78. #endif
  79. #define is_valid_type(type) ((type) <= L_CIPHER_RC2_CBC)
  80. static uint32_t supported_ciphers;
  81. static uint32_t supported_aead_ciphers;
  82. struct l_cipher {
  83. int type;
  84. const struct local_impl *local;
  85. union {
  86. int sk;
  87. void *local_data;
  88. };
  89. };
  90. struct l_aead_cipher {
  91. int type;
  92. int sk;
  93. };
  94. struct local_impl {
  95. void *(*cipher_new)(enum l_cipher_type,
  96. const void *key, size_t key_length);
  97. void (*cipher_free)(void *data);
  98. bool (*set_iv)(void *data, const uint8_t *iv, size_t iv_length);
  99. ssize_t (*operate)(void *data, __u32 operation,
  100. const struct iovec *in, size_t in_cnt,
  101. const struct iovec *out, size_t out_cnt);
  102. };
  103. static int create_alg(const char *alg_type, const char *alg_name,
  104. const void *key, size_t key_length, size_t tag_length)
  105. {
  106. struct sockaddr_alg salg;
  107. int sk;
  108. int ret;
  109. sk = socket(PF_ALG, SOCK_SEQPACKET | SOCK_CLOEXEC, 0);
  110. if (sk < 0)
  111. return -errno;
  112. memset(&salg, 0, sizeof(salg));
  113. salg.salg_family = AF_ALG;
  114. strcpy((char *) salg.salg_type, alg_type);
  115. strcpy((char *) salg.salg_name, alg_name);
  116. if (bind(sk, (struct sockaddr *) &salg, sizeof(salg)) < 0) {
  117. close(sk);
  118. return -1;
  119. }
  120. if (setsockopt(sk, SOL_ALG, ALG_SET_KEY, key, key_length) < 0) {
  121. close(sk);
  122. return -1;
  123. }
  124. if (tag_length && setsockopt(sk, SOL_ALG, ALG_SET_AEAD_AUTHSIZE, NULL,
  125. tag_length)) {
  126. close(sk);
  127. return -1;
  128. }
  129. ret = accept4(sk, NULL, 0, SOCK_CLOEXEC);
  130. close(sk);
  131. return ret;
  132. }
  133. static const char *cipher_type_to_name(enum l_cipher_type type)
  134. {
  135. switch (type) {
  136. case L_CIPHER_AES:
  137. return "ecb(aes)";
  138. case L_CIPHER_AES_CBC:
  139. return "cbc(aes)";
  140. case L_CIPHER_AES_CTR:
  141. return "ctr(aes)";
  142. case L_CIPHER_ARC4:
  143. return NULL;
  144. case L_CIPHER_DES:
  145. return "ecb(des)";
  146. case L_CIPHER_DES_CBC:
  147. return "cbc(des)";
  148. case L_CIPHER_DES3_EDE_CBC:
  149. return "cbc(des3_ede)";
  150. case L_CIPHER_RC2_CBC:
  151. return NULL;
  152. }
  153. return NULL;
  154. }
  155. static const struct local_impl local_arc4;
  156. static const struct local_impl local_rc2_cbc;
  157. static const struct local_impl *local_impl_ciphers[] = {
  158. [L_CIPHER_ARC4] = &local_arc4,
  159. [L_CIPHER_RC2_CBC] = &local_rc2_cbc,
  160. };
  161. #define HAVE_LOCAL_IMPLEMENTATION(type) \
  162. ((type) < L_ARRAY_SIZE(local_impl_ciphers) && \
  163. local_impl_ciphers[(type)])
  164. LIB_EXPORT struct l_cipher *l_cipher_new(enum l_cipher_type type,
  165. const void *key,
  166. size_t key_length)
  167. {
  168. struct l_cipher *cipher;
  169. const char *alg_name;
  170. if (unlikely(!key))
  171. return NULL;
  172. if (!is_valid_type(type))
  173. return NULL;
  174. cipher = l_new(struct l_cipher, 1);
  175. cipher->type = type;
  176. alg_name = cipher_type_to_name(type);
  177. if (HAVE_LOCAL_IMPLEMENTATION(type)) {
  178. cipher->local = local_impl_ciphers[type];
  179. cipher->local_data = cipher->local->cipher_new(type,
  180. key, key_length);
  181. if (!cipher->local_data)
  182. goto error_free;
  183. return cipher;
  184. }
  185. cipher->sk = create_alg("skcipher", alg_name, key, key_length, 0);
  186. if (cipher->sk < 0)
  187. goto error_free;
  188. return cipher;
  189. error_free:
  190. l_free(cipher);
  191. return NULL;
  192. }
  193. static const char *aead_cipher_type_to_name(enum l_aead_cipher_type type)
  194. {
  195. switch (type) {
  196. case L_AEAD_CIPHER_AES_CCM:
  197. return "ccm(aes)";
  198. case L_AEAD_CIPHER_AES_GCM:
  199. return "gcm(aes)";
  200. }
  201. return NULL;
  202. }
  203. LIB_EXPORT struct l_aead_cipher *l_aead_cipher_new(enum l_aead_cipher_type type,
  204. const void *key,
  205. size_t key_length,
  206. size_t tag_length)
  207. {
  208. struct l_aead_cipher *cipher;
  209. const char *alg_name;
  210. if (unlikely(!key))
  211. return NULL;
  212. if (type != L_AEAD_CIPHER_AES_CCM && type != L_AEAD_CIPHER_AES_GCM)
  213. return NULL;
  214. cipher = l_new(struct l_aead_cipher, 1);
  215. cipher->type = type;
  216. alg_name = aead_cipher_type_to_name(type);
  217. cipher->sk = create_alg("aead", alg_name, key, key_length, tag_length);
  218. if (cipher->sk >= 0)
  219. return cipher;
  220. l_free(cipher);
  221. return NULL;
  222. }
  223. LIB_EXPORT void l_cipher_free(struct l_cipher *cipher)
  224. {
  225. if (unlikely(!cipher))
  226. return;
  227. if (cipher->local)
  228. cipher->local->cipher_free(cipher->local_data);
  229. else
  230. close(cipher->sk);
  231. l_free(cipher);
  232. }
  233. LIB_EXPORT void l_aead_cipher_free(struct l_aead_cipher *cipher)
  234. {
  235. if (unlikely(!cipher))
  236. return;
  237. close(cipher->sk);
  238. l_free(cipher);
  239. }
  240. static ssize_t operate_cipher(int sk, __u32 operation,
  241. const void *in, size_t in_len,
  242. const void *ad, size_t ad_len,
  243. const void *iv, size_t iv_len,
  244. void *out, size_t out_len)
  245. {
  246. char *c_msg_buf;
  247. size_t c_msg_size;
  248. struct msghdr msg;
  249. struct cmsghdr *c_msg;
  250. struct iovec iov[2];
  251. ssize_t result;
  252. c_msg_size = CMSG_SPACE(sizeof(operation));
  253. c_msg_size += ad_len ? CMSG_SPACE(sizeof(uint32_t)) : 0;
  254. c_msg_size += iv_len ?
  255. CMSG_SPACE(sizeof(struct af_alg_iv) + iv_len) : 0;
  256. c_msg_buf = alloca(c_msg_size);
  257. memset(c_msg_buf, 0, c_msg_size);
  258. memset(&msg, 0, sizeof(msg));
  259. msg.msg_iov = iov;
  260. msg.msg_control = c_msg_buf;
  261. msg.msg_controllen = c_msg_size;
  262. c_msg = CMSG_FIRSTHDR(&msg);
  263. c_msg->cmsg_level = SOL_ALG;
  264. c_msg->cmsg_type = ALG_SET_OP;
  265. c_msg->cmsg_len = CMSG_LEN(sizeof(operation));
  266. memcpy(CMSG_DATA(c_msg), &operation, sizeof(operation));
  267. if (ad_len) {
  268. uint32_t *ad_data;
  269. c_msg = CMSG_NXTHDR(&msg, c_msg);
  270. c_msg->cmsg_level = SOL_ALG;
  271. c_msg->cmsg_type = ALG_SET_AEAD_ASSOCLEN;
  272. c_msg->cmsg_len = CMSG_LEN(sizeof(*ad_data));
  273. ad_data = (void *) CMSG_DATA(c_msg);
  274. *ad_data = ad_len;
  275. iov[0].iov_base = (void *) ad;
  276. iov[0].iov_len = ad_len;
  277. iov[1].iov_base = (void *) in;
  278. iov[1].iov_len = in_len;
  279. msg.msg_iovlen = 2;
  280. } else {
  281. iov[0].iov_base = (void *) in;
  282. iov[0].iov_len = in_len;
  283. msg.msg_iovlen = 1;
  284. }
  285. if (iv_len) {
  286. struct af_alg_iv *algiv;
  287. c_msg = CMSG_NXTHDR(&msg, c_msg);
  288. c_msg->cmsg_level = SOL_ALG;
  289. c_msg->cmsg_type = ALG_SET_IV;
  290. c_msg->cmsg_len = CMSG_LEN(sizeof(*algiv) + iv_len);
  291. algiv = (void *)CMSG_DATA(c_msg);
  292. algiv->ivlen = iv_len;
  293. memcpy(algiv->iv, iv, iv_len);
  294. }
  295. result = sendmsg(sk, &msg, 0);
  296. if (result < 0)
  297. return -errno;
  298. if (ad_len) {
  299. /*
  300. * When AEAD additional data is passed to sendmsg() for
  301. * use in computing the tag, those bytes also appear at
  302. * the beginning of the encrypt or decrypt results. Rather
  303. * than force the caller to pad their result buffer with
  304. * the correct number of bytes for the additional data,
  305. * the necessary space is allocated here and then the
  306. * duplicate AAD is discarded.
  307. */
  308. iov[0].iov_base = l_malloc(ad_len);
  309. iov[0].iov_len = ad_len;
  310. iov[1].iov_base = (void *) out;
  311. iov[1].iov_len = out_len;
  312. msg.msg_iovlen = 2;
  313. msg.msg_control = NULL;
  314. msg.msg_controllen = 0;
  315. result = recvmsg(sk, &msg, 0);
  316. if (result >= (ssize_t) ad_len)
  317. result -= ad_len;
  318. else if (result > 0)
  319. result = 0;
  320. l_free(iov[0].iov_base);
  321. } else {
  322. result = read(sk, out, out_len);
  323. }
  324. if (result < 0)
  325. return -errno;
  326. return result;
  327. }
  328. static ssize_t operate_cipherv(int sk, __u32 operation,
  329. const struct iovec *in, size_t in_cnt,
  330. const struct iovec *out, size_t out_cnt)
  331. {
  332. char *c_msg_buf;
  333. size_t c_msg_size;
  334. struct msghdr msg;
  335. struct cmsghdr *c_msg;
  336. ssize_t result;
  337. c_msg_size = CMSG_SPACE(sizeof(operation));
  338. c_msg_buf = alloca(c_msg_size);
  339. memset(c_msg_buf, 0, c_msg_size);
  340. memset(&msg, 0, sizeof(msg));
  341. msg.msg_iov = (struct iovec *) in;
  342. msg.msg_iovlen = in_cnt;
  343. msg.msg_control = c_msg_buf;
  344. msg.msg_controllen = c_msg_size;
  345. c_msg = CMSG_FIRSTHDR(&msg);
  346. c_msg->cmsg_level = SOL_ALG;
  347. c_msg->cmsg_type = ALG_SET_OP;
  348. c_msg->cmsg_len = CMSG_LEN(sizeof(operation));
  349. memcpy(CMSG_DATA(c_msg), &operation, sizeof(operation));
  350. result = sendmsg(sk, &msg, 0);
  351. if (result < 0)
  352. return -errno;
  353. result = readv(sk, out, out_cnt);
  354. if (result < 0)
  355. return -errno;
  356. return result;
  357. }
  358. LIB_EXPORT bool l_cipher_encrypt(struct l_cipher *cipher,
  359. const void *in, void *out, size_t len)
  360. {
  361. if (unlikely(!cipher))
  362. return false;
  363. if (unlikely(!in) || unlikely(!out))
  364. return false;
  365. if (cipher->local) {
  366. struct iovec in_iov = { (void *) in, len };
  367. struct iovec out_iov = { out, len };
  368. return cipher->local->operate(cipher->local_data,
  369. ALG_OP_ENCRYPT,
  370. &in_iov, 1, &out_iov, 1) >= 0;
  371. }
  372. return operate_cipher(cipher->sk, ALG_OP_ENCRYPT, in, len,
  373. NULL, 0, NULL, 0, out, len) >= 0;
  374. }
  375. LIB_EXPORT bool l_cipher_encryptv(struct l_cipher *cipher,
  376. const struct iovec *in, size_t in_cnt,
  377. const struct iovec *out, size_t out_cnt)
  378. {
  379. if (unlikely(!cipher))
  380. return false;
  381. if (unlikely(!in) || unlikely(!out))
  382. return false;
  383. if (cipher->local)
  384. return cipher->local->operate(cipher->local_data,
  385. ALG_OP_ENCRYPT,
  386. in, in_cnt, out, out_cnt) >= 0;
  387. return operate_cipherv(cipher->sk, ALG_OP_ENCRYPT, in, in_cnt,
  388. out, out_cnt) >= 0;
  389. }
  390. LIB_EXPORT bool l_cipher_decrypt(struct l_cipher *cipher,
  391. const void *in, void *out, size_t len)
  392. {
  393. if (unlikely(!cipher))
  394. return false;
  395. if (unlikely(!in) || unlikely(!out))
  396. return false;
  397. if (cipher->local) {
  398. struct iovec in_iov = { (void *) in, len };
  399. struct iovec out_iov = { out, len };
  400. return cipher->local->operate(cipher->local_data,
  401. ALG_OP_DECRYPT,
  402. &in_iov, 1, &out_iov, 1) >= 0;
  403. }
  404. return operate_cipher(cipher->sk, ALG_OP_DECRYPT, in, len,
  405. NULL, 0, NULL, 0, out, len) >= 0;
  406. }
  407. LIB_EXPORT bool l_cipher_decryptv(struct l_cipher *cipher,
  408. const struct iovec *in, size_t in_cnt,
  409. const struct iovec *out, size_t out_cnt)
  410. {
  411. if (unlikely(!cipher))
  412. return false;
  413. if (unlikely(!in) || unlikely(!out))
  414. return false;
  415. if (cipher->local)
  416. return cipher->local->operate(cipher->local_data,
  417. ALG_OP_DECRYPT,
  418. in, in_cnt, out, out_cnt) >= 0;
  419. return operate_cipherv(cipher->sk, ALG_OP_DECRYPT, in, in_cnt,
  420. out, out_cnt) >= 0;
  421. }
  422. LIB_EXPORT bool l_cipher_set_iv(struct l_cipher *cipher, const uint8_t *iv,
  423. size_t iv_length)
  424. {
  425. char c_msg_buf[CMSG_SPACE(4 + iv_length)];
  426. struct msghdr msg;
  427. struct cmsghdr *c_msg;
  428. uint32_t len = iv_length;
  429. if (unlikely(!cipher))
  430. return false;
  431. if (cipher->local) {
  432. if (!cipher->local->set_iv)
  433. return false;
  434. return cipher->local->set_iv(cipher->local_data, iv, iv_length);
  435. }
  436. memset(&c_msg_buf, 0, sizeof(c_msg_buf));
  437. memset(&msg, 0, sizeof(struct msghdr));
  438. msg.msg_control = c_msg_buf;
  439. msg.msg_controllen = sizeof(c_msg_buf);
  440. c_msg = CMSG_FIRSTHDR(&msg);
  441. c_msg->cmsg_level = SOL_ALG;
  442. c_msg->cmsg_type = ALG_SET_IV;
  443. c_msg->cmsg_len = CMSG_LEN(4 + iv_length);
  444. memcpy(CMSG_DATA(c_msg) + 0, &len, 4);
  445. memcpy(CMSG_DATA(c_msg) + 4, iv, iv_length);
  446. msg.msg_iov = NULL;
  447. msg.msg_iovlen = 0;
  448. if (sendmsg(cipher->sk, &msg, MSG_MORE) < 0)
  449. return false;
  450. return true;
  451. }
  452. #define CCM_IV_SIZE 16
  453. static size_t l_aead_cipher_get_ivlen(struct l_aead_cipher *cipher)
  454. {
  455. switch (cipher->type) {
  456. case L_AEAD_CIPHER_AES_CCM:
  457. return CCM_IV_SIZE;
  458. case L_AEAD_CIPHER_AES_GCM:
  459. return 12;
  460. }
  461. return 0;
  462. }
  463. /* RFC3610 Section 2.3 */
  464. static ssize_t build_ccm_iv(const void *nonce, uint8_t nonce_len,
  465. uint8_t (*iv)[CCM_IV_SIZE])
  466. {
  467. const size_t iv_overhead = 2;
  468. int lprime = 15 - nonce_len - 1;
  469. if (unlikely(nonce_len + iv_overhead > CCM_IV_SIZE || lprime > 7))
  470. return -EINVAL;
  471. (*iv)[0] = lprime;
  472. memcpy(*iv + 1, nonce, nonce_len);
  473. memset(*iv + 1 + nonce_len, 0, lprime + 1);
  474. return CCM_IV_SIZE;
  475. }
  476. LIB_EXPORT bool l_aead_cipher_encrypt(struct l_aead_cipher *cipher,
  477. const void *in, size_t in_len,
  478. const void *ad, size_t ad_len,
  479. const void *nonce, size_t nonce_len,
  480. void *out, size_t out_len)
  481. {
  482. uint8_t ccm_iv[CCM_IV_SIZE];
  483. const uint8_t *iv;
  484. ssize_t iv_len;
  485. if (unlikely(!cipher))
  486. return false;
  487. if (unlikely(!in && !ad) || unlikely(!out))
  488. return false;
  489. if (unlikely(!in && in_len) || unlikely(!ad && ad_len))
  490. return false;
  491. if (cipher->type == L_AEAD_CIPHER_AES_CCM) {
  492. iv_len = build_ccm_iv(nonce, nonce_len, &ccm_iv);
  493. if (unlikely(iv_len < 0))
  494. return false;
  495. iv = ccm_iv;
  496. } else {
  497. if (unlikely(nonce_len != l_aead_cipher_get_ivlen(cipher)))
  498. return false;
  499. iv = nonce;
  500. iv_len = nonce_len;
  501. }
  502. return operate_cipher(cipher->sk, ALG_OP_ENCRYPT, in, in_len,
  503. ad, ad_len, iv, iv_len, out, out_len) ==
  504. (ssize_t)out_len;
  505. }
  506. LIB_EXPORT bool l_aead_cipher_decrypt(struct l_aead_cipher *cipher,
  507. const void *in, size_t in_len,
  508. const void *ad, size_t ad_len,
  509. const void *nonce, size_t nonce_len,
  510. void *out, size_t out_len)
  511. {
  512. uint8_t ccm_iv[CCM_IV_SIZE];
  513. const uint8_t *iv;
  514. ssize_t iv_len;
  515. if (unlikely(!cipher))
  516. return false;
  517. if (unlikely(!in) || unlikely(!out))
  518. return false;
  519. if (cipher->type == L_AEAD_CIPHER_AES_CCM) {
  520. iv_len = build_ccm_iv(nonce, nonce_len, &ccm_iv);
  521. if (unlikely(iv_len < 0))
  522. return false;
  523. iv = ccm_iv;
  524. } else {
  525. if (unlikely(nonce_len != l_aead_cipher_get_ivlen(cipher)))
  526. return false;
  527. iv = nonce;
  528. iv_len = nonce_len;
  529. }
  530. return operate_cipher(cipher->sk, ALG_OP_DECRYPT, in, in_len,
  531. ad, ad_len, iv, iv_len, out, out_len) ==
  532. (ssize_t)out_len;
  533. }
  534. static void init_supported()
  535. {
  536. static bool initialized = false;
  537. struct sockaddr_alg salg;
  538. int sk;
  539. enum l_cipher_type c;
  540. enum l_aead_cipher_type a;
  541. if (likely(initialized))
  542. return;
  543. initialized = true;
  544. for (c = 0; c < L_ARRAY_SIZE(local_impl_ciphers); c++)
  545. if (HAVE_LOCAL_IMPLEMENTATION(c))
  546. supported_ciphers |= 1 << c;
  547. sk = socket(PF_ALG, SOCK_SEQPACKET | SOCK_CLOEXEC, 0);
  548. if (sk < 0)
  549. return;
  550. memset(&salg, 0, sizeof(salg));
  551. salg.salg_family = AF_ALG;
  552. strcpy((char *) salg.salg_type, "skcipher");
  553. for (c = L_CIPHER_AES; c <= L_CIPHER_DES3_EDE_CBC; c++) {
  554. const char *name = cipher_type_to_name(c);
  555. if (!name)
  556. continue;
  557. strcpy((char *) salg.salg_name, name);
  558. if (bind(sk, (struct sockaddr *) &salg, sizeof(salg)) < 0)
  559. continue;
  560. supported_ciphers |= 1 << c;
  561. }
  562. strcpy((char *) salg.salg_type, "aead");
  563. for (a = L_AEAD_CIPHER_AES_CCM; a <= L_AEAD_CIPHER_AES_GCM; a++) {
  564. strcpy((char *) salg.salg_name, aead_cipher_type_to_name(a));
  565. if (bind(sk, (struct sockaddr *) &salg, sizeof(salg)) < 0)
  566. continue;
  567. supported_aead_ciphers |= 1 << a;
  568. }
  569. close(sk);
  570. }
  571. LIB_EXPORT bool l_cipher_is_supported(enum l_cipher_type type)
  572. {
  573. if (!is_valid_type(type))
  574. return false;
  575. init_supported();
  576. return supported_ciphers & (1 << type);
  577. }
  578. LIB_EXPORT bool l_aead_cipher_is_supported(enum l_aead_cipher_type type)
  579. {
  580. if (type != L_AEAD_CIPHER_AES_CCM && type != L_AEAD_CIPHER_AES_GCM)
  581. return false;
  582. init_supported();
  583. return supported_aead_ciphers & (1 << type);
  584. }
  585. /* ARC4 implementation copyright (c) 2001 Niels Möller */
  586. #define SWAP(a, b) do { uint8_t _t = a; a = b; b = _t; } while (0)
  587. static void arc4_set_key(uint8_t *S, const uint8_t *key, size_t key_length)
  588. {
  589. unsigned int i;
  590. uint8_t j;
  591. for (i = 0; i < 256; i++)
  592. S[i] = i;
  593. for (i = j = 0; i < 256; i++) {
  594. j += S[i] + key[i % key_length];
  595. SWAP(S[i], S[j]);
  596. }
  597. }
  598. struct arc4_state {
  599. struct arc4_state_ctx {
  600. uint8_t S[256];
  601. uint8_t i;
  602. uint8_t j;
  603. } ctx[2];
  604. };
  605. static void *local_arc4_new(enum l_cipher_type type,
  606. const void *key, size_t key_length)
  607. {
  608. struct arc4_state *s;
  609. if (unlikely(key_length == 0 || key_length > 256))
  610. return NULL;
  611. s = l_new(struct arc4_state, 1);
  612. arc4_set_key(s->ctx[0].S, key, key_length);
  613. s->ctx[1] = s->ctx[0];
  614. return s;
  615. }
  616. static void local_arc4_free(void *data)
  617. {
  618. explicit_bzero(data, sizeof(struct arc4_state));
  619. l_free(data);
  620. }
  621. static ssize_t local_arc4_operate(void *data, __u32 operation,
  622. const struct iovec *in, size_t in_cnt,
  623. const struct iovec *out, size_t out_cnt)
  624. {
  625. struct arc4_state *s = data;
  626. struct iovec cur_in;
  627. struct iovec cur_out;
  628. struct arc4_state_ctx *ctx =
  629. &s->ctx[operation == ALG_OP_ENCRYPT ? 1 : 0];
  630. if (!in_cnt || !out_cnt)
  631. return 0;
  632. cur_in = *in;
  633. cur_out = *out;
  634. while (1) {
  635. while (!cur_in.iov_len) {
  636. cur_in = *in++;
  637. if (!--in_cnt)
  638. return 0;
  639. }
  640. while (!cur_out.iov_len) {
  641. cur_out = *out++;
  642. if (!--out_cnt)
  643. return 0;
  644. }
  645. ctx->j += ctx->S[++ctx->i];
  646. SWAP(ctx->S[ctx->i], ctx->S[ctx->j]);
  647. *(uint8_t *) cur_out.iov_base++ =
  648. *(uint8_t *) cur_in.iov_base++ ^
  649. ctx->S[(ctx->S[ctx->i] + ctx->S[ctx->j]) & 0xff];
  650. cur_in.iov_len--;
  651. cur_out.iov_len--;
  652. }
  653. }
  654. static const struct local_impl local_arc4 = {
  655. local_arc4_new,
  656. local_arc4_free,
  657. NULL,
  658. local_arc4_operate,
  659. };
  660. struct rc2_state {
  661. union {
  662. uint16_t xkey[64];
  663. uint8_t xkey8[128];
  664. };
  665. struct rc2_state_ctx {
  666. union {
  667. uint16_t x[4];
  668. uint64_t x64;
  669. };
  670. } ctx[2];
  671. };
  672. /* Simplified from the 1996 public-domain implementation */
  673. static void rc2_keyschedule(struct rc2_state *s,
  674. const uint8_t *key, size_t key_len,
  675. size_t bits)
  676. {
  677. static const uint8_t permute[256] = {
  678. 217,120,249,196, 25,221,181,237, 40,233,253,121, 74,160,216,157,
  679. 198,126, 55,131, 43,118, 83,142, 98, 76,100,136, 68,139,251,162,
  680. 23,154, 89,245,135,179, 79, 19, 97, 69,109,141, 9,129,125, 50,
  681. 189,143, 64,235,134,183,123, 11,240,149, 33, 34, 92,107, 78,130,
  682. 84,214,101,147,206, 96,178, 28,115, 86,192, 20,167,140,241,220,
  683. 18,117,202, 31, 59,190,228,209, 66, 61,212, 48,163, 60,182, 38,
  684. 111,191, 14,218, 70,105, 7, 87, 39,242, 29,155,188,148, 67, 3,
  685. 248, 17,199,246,144,239, 62,231, 6,195,213, 47,200,102, 30,215,
  686. 8,232,234,222,128, 82,238,247,132,170,114,172, 53, 77,106, 42,
  687. 150, 26,210,113, 90, 21, 73,116, 75,159,208, 94, 4, 24,164,236,
  688. 194,224, 65,110, 15, 81,203,204, 36,145,175, 80,161,244,112, 57,
  689. 153,124, 58,133, 35,184,180,122,252, 2, 54, 91, 37, 85,151, 49,
  690. 45, 93,250,152,227,138,146,174, 5,223, 41, 16,103,108,186,201,
  691. 211, 0,230,207,225,158,168, 44, 99, 22, 1, 63, 88,226,137,169,
  692. 13, 56, 52, 27,171, 51,255,176,187, 72, 12, 95,185,177,205, 46,
  693. 197,243,219, 71,229,165,156,119, 10,166, 32,104,254,127,193,173
  694. };
  695. uint8_t x;
  696. unsigned int i;
  697. memcpy(&s->xkey8, key, key_len);
  698. /* Step 1: expand input key to 128 bytes */
  699. x = s->xkey8[key_len - 1];
  700. for (i = 0; key_len < 128; key_len++, i++)
  701. s->xkey8[key_len] = x = permute[(x + s->xkey8[i]) & 255];
  702. /* Step 2: reduce effective key size to "bits" */
  703. key_len = (bits + 7) >> 3;
  704. i = 128 - key_len;
  705. s->xkey8[i] = x = permute[s->xkey8[i] & (255 >> (7 & -bits))];
  706. while (i--)
  707. s->xkey8[i] = x = permute[x ^ s->xkey8[i + key_len]];
  708. /* Step 3: copy to xkey in little-endian order */
  709. for (i = 0; i < 64; i++)
  710. s->xkey[i] = L_CPU_TO_LE16(s->xkey[i]);
  711. }
  712. static uint64_t rc2_operate(struct rc2_state *s, uint64_t in, __u32 operation)
  713. {
  714. int i;
  715. union {
  716. uint16_t x16[4];
  717. uint64_t x64;
  718. } x;
  719. x.x64 = in;
  720. if (operation == ALG_OP_ENCRYPT) {
  721. const uint16_t *xkey = s->xkey;
  722. for (i = 0; i < 16; i++) {
  723. x.x16[0] += (x.x16[1] & ~x.x16[3]) +
  724. (x.x16[2] & x.x16[3]) + *xkey++;
  725. x.x16[0] = (x.x16[0] << 1) | (x.x16[0] >> 15);
  726. x.x16[1] += (x.x16[2] & ~x.x16[0]) +
  727. (x.x16[3] & x.x16[0]) + *xkey++;
  728. x.x16[1] = (x.x16[1] << 2) | (x.x16[1] >> 14);
  729. x.x16[2] += (x.x16[3] & ~x.x16[1]) +
  730. (x.x16[0] & x.x16[1]) + *xkey++;
  731. x.x16[2] = (x.x16[2] << 3) | (x.x16[2] >> 13);
  732. x.x16[3] += (x.x16[0] & ~x.x16[2]) +
  733. (x.x16[1] & x.x16[2]) + *xkey++;
  734. x.x16[3] = (x.x16[3] << 5) | (x.x16[3] >> 11);
  735. if (i == 4 || i == 10) {
  736. x.x16[0] += s->xkey[x.x16[3] & 63];
  737. x.x16[1] += s->xkey[x.x16[0] & 63];
  738. x.x16[2] += s->xkey[x.x16[1] & 63];
  739. x.x16[3] += s->xkey[x.x16[2] & 63];
  740. }
  741. }
  742. } else {
  743. const uint16_t *xkey = s->xkey + 63;
  744. for (i = 0; i < 16; i++) {
  745. x.x16[3] = (x.x16[3] << 11) | (x.x16[3] >> 5);
  746. x.x16[3] -= (x.x16[0] & ~x.x16[2]) +
  747. (x.x16[1] & x.x16[2]) + *xkey--;
  748. x.x16[2] = (x.x16[2] << 13) | (x.x16[2] >> 3);
  749. x.x16[2] -= (x.x16[3] & ~x.x16[1]) +
  750. (x.x16[0] & x.x16[1]) + *xkey--;
  751. x.x16[1] = (x.x16[1] << 14) | (x.x16[1] >> 2);
  752. x.x16[1] -= (x.x16[2] & ~x.x16[0]) +
  753. (x.x16[3] & x.x16[0]) + *xkey--;
  754. x.x16[0] = (x.x16[0] << 15) | (x.x16[0] >> 1);
  755. x.x16[0] -= (x.x16[1] & ~x.x16[3]) +
  756. (x.x16[2] & x.x16[3]) + *xkey--;
  757. if (i == 4 || i == 10) {
  758. x.x16[3] -= s->xkey[x.x16[2] & 63];
  759. x.x16[2] -= s->xkey[x.x16[1] & 63];
  760. x.x16[1] -= s->xkey[x.x16[0] & 63];
  761. x.x16[0] -= s->xkey[x.x16[3] & 63];
  762. }
  763. }
  764. }
  765. return x.x64;
  766. }
  767. static void *local_rc2_cbc_new(enum l_cipher_type type,
  768. const void *key, size_t key_length)
  769. {
  770. struct rc2_state *s;
  771. if (unlikely(key_length == 0 || key_length > 128))
  772. return NULL;
  773. /*
  774. * The key length and the effective "strength" bits are separate
  775. * parameters but they match in our current use cases.
  776. */
  777. s = l_new(struct rc2_state, 1);
  778. rc2_keyschedule(s, key, key_length, key_length * 8);
  779. return s;
  780. }
  781. static void local_rc2_cbc_free(void *data)
  782. {
  783. explicit_bzero(data, sizeof(struct rc2_state));
  784. l_free(data);
  785. }
  786. static bool local_rc2_cbc_set_iv(void *data,
  787. const uint8_t *iv, size_t iv_length)
  788. {
  789. struct rc2_state *s = data;
  790. if (unlikely(iv_length != 8))
  791. return false;
  792. s->ctx[0].x[0] = l_get_le16(iv + 0);
  793. s->ctx[0].x[1] = l_get_le16(iv + 2);
  794. s->ctx[0].x[2] = l_get_le16(iv + 4);
  795. s->ctx[0].x[3] = l_get_le16(iv + 6);
  796. s->ctx[1].x64 = s->ctx[0].x64;
  797. return true;
  798. }
  799. static ssize_t local_rc2_cbc_operate(void *data, __u32 operation,
  800. const struct iovec *in, size_t in_cnt,
  801. const struct iovec *out, size_t out_cnt)
  802. {
  803. struct rc2_state *s = data;
  804. struct iovec cur_in = {};
  805. struct iovec cur_out = {};
  806. struct rc2_state_ctx *ctx =
  807. &s->ctx[operation == ALG_OP_ENCRYPT ? 1 : 0];
  808. #define CONSUME_IN(bytes, eof_ok) \
  809. cur_in.iov_len -= (bytes); \
  810. while (!cur_in.iov_len) { \
  811. if (!in_cnt) { \
  812. if (eof_ok) \
  813. break; \
  814. else \
  815. return -1; \
  816. } \
  817. \
  818. cur_in = *in++; \
  819. in_cnt--; \
  820. }
  821. #define CONSUME_OUT(bytes) \
  822. cur_out.iov_len -= (bytes); \
  823. while (!cur_out.iov_len) { \
  824. if (!out_cnt) \
  825. return 0; \
  826. \
  827. cur_out = *out++; \
  828. out_cnt--; \
  829. }
  830. CONSUME_IN(0, true)
  831. CONSUME_OUT(0)
  832. while (cur_in.iov_len) {
  833. union {
  834. uint16_t x16[4];
  835. uint64_t x64;
  836. } inblk;
  837. if (cur_in.iov_len >= 8) {
  838. #define CUR_IN16 (*(uint16_t **) &cur_in.iov_base)
  839. inblk.x16[0] = l_get_le16(CUR_IN16++);
  840. inblk.x16[1] = l_get_le16(CUR_IN16++);
  841. inblk.x16[2] = l_get_le16(CUR_IN16++);
  842. inblk.x16[3] = l_get_le16(CUR_IN16++);
  843. CONSUME_IN(8, true)
  844. } else {
  845. inblk.x16[0] = *(uint8_t *) cur_in.iov_base++;
  846. CONSUME_IN(1, false)
  847. inblk.x16[0] |= (*(uint8_t *) cur_in.iov_base++) << 8;
  848. CONSUME_IN(1, false)
  849. inblk.x16[1] = *(uint8_t *) cur_in.iov_base++;
  850. CONSUME_IN(1, false)
  851. inblk.x16[1] |= (*(uint8_t *) cur_in.iov_base++) << 8;
  852. CONSUME_IN(1, false)
  853. inblk.x16[2] = *(uint8_t *) cur_in.iov_base++;
  854. CONSUME_IN(1, false)
  855. inblk.x16[2] |= (*(uint8_t *) cur_in.iov_base++) << 8;
  856. CONSUME_IN(1, false)
  857. inblk.x16[3] = *(uint8_t *) cur_in.iov_base++;
  858. CONSUME_IN(1, false)
  859. inblk.x16[3] |= (*(uint8_t *) cur_in.iov_base++) << 8;
  860. CONSUME_IN(1, true)
  861. }
  862. if (operation == ALG_OP_ENCRYPT)
  863. ctx->x64 = rc2_operate(s, inblk.x64 ^ ctx->x64,
  864. operation);
  865. else
  866. ctx->x64 ^= rc2_operate(s, inblk.x64, operation);
  867. if (cur_out.iov_len >= 8) {
  868. #define CUR_OUT16 (*(uint16_t **) &cur_out.iov_base)
  869. l_put_le16(ctx->x[0], CUR_OUT16++);
  870. l_put_le16(ctx->x[1], CUR_OUT16++);
  871. l_put_le16(ctx->x[2], CUR_OUT16++);
  872. l_put_le16(ctx->x[3], CUR_OUT16++);
  873. CONSUME_OUT(8)
  874. } else {
  875. *(uint8_t *) cur_out.iov_base++ = ctx->x[0];
  876. CONSUME_OUT(1)
  877. *(uint8_t *) cur_out.iov_base++ = ctx->x[0] >> 8;
  878. CONSUME_OUT(1)
  879. *(uint8_t *) cur_out.iov_base++ = ctx->x[1];
  880. CONSUME_OUT(1)
  881. *(uint8_t *) cur_out.iov_base++ = ctx->x[1] >> 8;
  882. CONSUME_OUT(1)
  883. *(uint8_t *) cur_out.iov_base++ = ctx->x[2];
  884. CONSUME_OUT(1)
  885. *(uint8_t *) cur_out.iov_base++ = ctx->x[2] >> 8;
  886. CONSUME_OUT(1)
  887. *(uint8_t *) cur_out.iov_base++ = ctx->x[3];
  888. CONSUME_OUT(1)
  889. *(uint8_t *) cur_out.iov_base++ = ctx->x[3] >> 8;
  890. CONSUME_OUT(1)
  891. }
  892. /* Save ciphertext as IV for next CBC block */
  893. if (operation == ALG_OP_DECRYPT)
  894. ctx->x64 = inblk.x64;
  895. inblk.x64 = 0;
  896. }
  897. return 0;
  898. }
  899. static const struct local_impl local_rc2_cbc = {
  900. local_rc2_cbc_new,
  901. local_rc2_cbc_free,
  902. local_rc2_cbc_set_iv,
  903. local_rc2_cbc_operate,
  904. };