openssl_aes.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709
  1. /**
  2. * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
  3. * SPDX-License-Identifier: Apache-2.0.
  4. */
  5. #include <aws/cal/private/symmetric_cipher_priv.h>
  6. #include <openssl/evp.h>
  7. struct openssl_aes_cipher {
  8. struct aws_symmetric_cipher cipher_base;
  9. EVP_CIPHER_CTX *encryptor_ctx;
  10. EVP_CIPHER_CTX *decryptor_ctx;
  11. struct aws_byte_buf working_buffer;
  12. };
  13. static int s_encrypt(struct aws_symmetric_cipher *cipher, struct aws_byte_cursor input, struct aws_byte_buf *out) {
  14. size_t required_buffer_space = input.len + cipher->block_size;
  15. if (aws_symmetric_cipher_try_ensure_sufficient_buffer_space(out, required_buffer_space)) {
  16. return aws_raise_error(AWS_ERROR_SHORT_BUFFER);
  17. }
  18. size_t available_write_space = out->capacity - out->len;
  19. struct openssl_aes_cipher *openssl_cipher = cipher->impl;
  20. int len_written = (int)(available_write_space);
  21. if (!EVP_EncryptUpdate(
  22. openssl_cipher->encryptor_ctx, out->buffer + out->len, &len_written, input.ptr, (int)input.len)) {
  23. cipher->good = false;
  24. return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
  25. }
  26. out->len += len_written;
  27. return AWS_OP_SUCCESS;
  28. }
  29. static int s_finalize_encryption(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out) {
  30. struct openssl_aes_cipher *openssl_cipher = cipher->impl;
  31. size_t required_buffer_space = cipher->block_size;
  32. if (aws_symmetric_cipher_try_ensure_sufficient_buffer_space(out, required_buffer_space)) {
  33. return aws_raise_error(AWS_ERROR_SHORT_BUFFER);
  34. }
  35. int len_written = (int)(out->capacity - out->len);
  36. if (!EVP_EncryptFinal_ex(openssl_cipher->encryptor_ctx, out->buffer + out->len, &len_written)) {
  37. cipher->good = false;
  38. return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
  39. }
  40. out->len += len_written;
  41. return AWS_OP_SUCCESS;
  42. }
  43. static int s_decrypt(struct aws_symmetric_cipher *cipher, struct aws_byte_cursor input, struct aws_byte_buf *out) {
  44. struct openssl_aes_cipher *openssl_cipher = cipher->impl;
  45. size_t required_buffer_space = input.len + cipher->block_size;
  46. if (aws_symmetric_cipher_try_ensure_sufficient_buffer_space(out, required_buffer_space)) {
  47. return aws_raise_error(AWS_ERROR_SHORT_BUFFER);
  48. }
  49. size_t available_write_space = out->capacity - out->len;
  50. int len_written = (int)available_write_space;
  51. if (!EVP_DecryptUpdate(
  52. openssl_cipher->decryptor_ctx, out->buffer + out->len, &len_written, input.ptr, (int)input.len)) {
  53. cipher->good = false;
  54. return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
  55. }
  56. out->len += len_written;
  57. return AWS_OP_SUCCESS;
  58. }
  59. static int s_finalize_decryption(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out) {
  60. struct openssl_aes_cipher *openssl_cipher = cipher->impl;
  61. size_t required_buffer_space = cipher->block_size;
  62. if (aws_symmetric_cipher_try_ensure_sufficient_buffer_space(out, required_buffer_space)) {
  63. return aws_raise_error(AWS_ERROR_SHORT_BUFFER);
  64. }
  65. int len_written = (int)out->capacity - out->len;
  66. if (!EVP_DecryptFinal_ex(openssl_cipher->decryptor_ctx, out->buffer + out->len, &len_written)) {
  67. cipher->good = false;
  68. return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
  69. }
  70. out->len += len_written;
  71. return AWS_OP_SUCCESS;
  72. }
  73. static void s_destroy(struct aws_symmetric_cipher *cipher) {
  74. struct openssl_aes_cipher *openssl_cipher = cipher->impl;
  75. if (openssl_cipher->encryptor_ctx) {
  76. EVP_CIPHER_CTX_free(openssl_cipher->encryptor_ctx);
  77. }
  78. if (openssl_cipher->decryptor_ctx) {
  79. EVP_CIPHER_CTX_free(openssl_cipher->decryptor_ctx);
  80. }
  81. aws_byte_buf_clean_up_secure(&cipher->key);
  82. aws_byte_buf_clean_up_secure(&cipher->iv);
  83. if (cipher->tag.buffer) {
  84. aws_byte_buf_clean_up_secure(&cipher->tag);
  85. }
  86. if (cipher->aad.buffer) {
  87. aws_byte_buf_clean_up_secure(&cipher->aad);
  88. }
  89. aws_byte_buf_clean_up_secure(&openssl_cipher->working_buffer);
  90. aws_mem_release(cipher->allocator, openssl_cipher);
  91. }
  92. static int s_clear_reusable_state(struct aws_symmetric_cipher *cipher) {
  93. struct openssl_aes_cipher *openssl_cipher = cipher->impl;
  94. EVP_CIPHER_CTX_cleanup(openssl_cipher->encryptor_ctx);
  95. EVP_CIPHER_CTX_cleanup(openssl_cipher->decryptor_ctx);
  96. aws_byte_buf_secure_zero(&openssl_cipher->working_buffer);
  97. cipher->good = true;
  98. return AWS_OP_SUCCESS;
  99. }
  100. static int s_init_cbc_cipher_materials(struct aws_symmetric_cipher *cipher) {
  101. struct openssl_aes_cipher *openssl_cipher = cipher->impl;
  102. if (!EVP_EncryptInit_ex(
  103. openssl_cipher->encryptor_ctx,
  104. EVP_aes_256_cbc(),
  105. NULL,
  106. openssl_cipher->cipher_base.key.buffer,
  107. openssl_cipher->cipher_base.iv.buffer) ||
  108. !EVP_DecryptInit_ex(
  109. openssl_cipher->decryptor_ctx,
  110. EVP_aes_256_cbc(),
  111. NULL,
  112. openssl_cipher->cipher_base.key.buffer,
  113. openssl_cipher->cipher_base.iv.buffer)) {
  114. return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
  115. }
  116. return AWS_OP_SUCCESS;
  117. }
  118. static int s_reset_cbc_cipher_materials(struct aws_symmetric_cipher *cipher) {
  119. int ret_val = s_clear_reusable_state(cipher);
  120. if (ret_val == AWS_OP_SUCCESS) {
  121. return s_init_cbc_cipher_materials(cipher);
  122. }
  123. return ret_val;
  124. }
  125. static struct aws_symmetric_cipher_vtable s_cbc_vtable = {
  126. .alg_name = "AES-CBC 256",
  127. .provider = "OpenSSL Compatible LibCrypto",
  128. .destroy = s_destroy,
  129. .reset = s_reset_cbc_cipher_materials,
  130. .decrypt = s_decrypt,
  131. .encrypt = s_encrypt,
  132. .finalize_decryption = s_finalize_decryption,
  133. .finalize_encryption = s_finalize_encryption,
  134. };
  135. struct aws_symmetric_cipher *aws_aes_cbc_256_new_impl(
  136. struct aws_allocator *allocator,
  137. const struct aws_byte_cursor *key,
  138. const struct aws_byte_cursor *iv) {
  139. struct openssl_aes_cipher *cipher = aws_mem_calloc(allocator, 1, sizeof(struct openssl_aes_cipher));
  140. cipher->cipher_base.allocator = allocator;
  141. cipher->cipher_base.block_size = AWS_AES_256_CIPHER_BLOCK_SIZE;
  142. cipher->cipher_base.key_length_bits = AWS_AES_256_KEY_BIT_LEN;
  143. cipher->cipher_base.vtable = &s_cbc_vtable;
  144. cipher->cipher_base.impl = cipher;
  145. if (key) {
  146. aws_byte_buf_init_copy_from_cursor(&cipher->cipher_base.key, allocator, *key);
  147. } else {
  148. aws_byte_buf_init(&cipher->cipher_base.key, allocator, AWS_AES_256_KEY_BYTE_LEN);
  149. aws_symmetric_cipher_generate_key(AWS_AES_256_KEY_BYTE_LEN, &cipher->cipher_base.key);
  150. }
  151. if (iv) {
  152. aws_byte_buf_init_copy_from_cursor(&cipher->cipher_base.iv, allocator, *iv);
  153. } else {
  154. aws_byte_buf_init(&cipher->cipher_base.iv, allocator, AWS_AES_256_CIPHER_BLOCK_SIZE);
  155. aws_symmetric_cipher_generate_initialization_vector(
  156. AWS_AES_256_CIPHER_BLOCK_SIZE, false, &cipher->cipher_base.iv);
  157. }
  158. /* EVP_CIPHER_CTX_init() will be called inside EVP_CIPHER_CTX_new(). */
  159. cipher->encryptor_ctx = EVP_CIPHER_CTX_new();
  160. AWS_FATAL_ASSERT(cipher->encryptor_ctx && "Cipher initialization failed!");
  161. /* EVP_CIPHER_CTX_init() will be called inside EVP_CIPHER_CTX_new(). */
  162. cipher->decryptor_ctx = EVP_CIPHER_CTX_new();
  163. AWS_FATAL_ASSERT(cipher->decryptor_ctx && "Cipher initialization failed!");
  164. if (s_init_cbc_cipher_materials(&cipher->cipher_base) != AWS_OP_SUCCESS) {
  165. goto error;
  166. }
  167. cipher->cipher_base.good = true;
  168. return &cipher->cipher_base;
  169. error:
  170. s_destroy(&cipher->cipher_base);
  171. return NULL;
  172. }
  173. static int s_init_ctr_cipher_materials(struct aws_symmetric_cipher *cipher) {
  174. struct openssl_aes_cipher *openssl_cipher = cipher->impl;
  175. if (!(EVP_EncryptInit_ex(
  176. openssl_cipher->encryptor_ctx,
  177. EVP_aes_256_ctr(),
  178. NULL,
  179. openssl_cipher->cipher_base.key.buffer,
  180. openssl_cipher->cipher_base.iv.buffer) &&
  181. EVP_CIPHER_CTX_set_padding(openssl_cipher->encryptor_ctx, 0)) ||
  182. !(EVP_DecryptInit_ex(
  183. openssl_cipher->decryptor_ctx,
  184. EVP_aes_256_ctr(),
  185. NULL,
  186. openssl_cipher->cipher_base.key.buffer,
  187. openssl_cipher->cipher_base.iv.buffer) &&
  188. EVP_CIPHER_CTX_set_padding(openssl_cipher->decryptor_ctx, 0))) {
  189. return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
  190. }
  191. return AWS_OP_SUCCESS;
  192. }
  193. static int s_reset_ctr_cipher_materials(struct aws_symmetric_cipher *cipher) {
  194. int ret_val = s_clear_reusable_state(cipher);
  195. if (ret_val == AWS_OP_SUCCESS) {
  196. return s_init_ctr_cipher_materials(cipher);
  197. }
  198. return ret_val;
  199. }
  200. static struct aws_symmetric_cipher_vtable s_ctr_vtable = {
  201. .alg_name = "AES-CTR 256",
  202. .provider = "OpenSSL Compatible LibCrypto",
  203. .destroy = s_destroy,
  204. .reset = s_reset_ctr_cipher_materials,
  205. .decrypt = s_decrypt,
  206. .encrypt = s_encrypt,
  207. .finalize_decryption = s_finalize_decryption,
  208. .finalize_encryption = s_finalize_encryption,
  209. };
  210. struct aws_symmetric_cipher *aws_aes_ctr_256_new_impl(
  211. struct aws_allocator *allocator,
  212. const struct aws_byte_cursor *key,
  213. const struct aws_byte_cursor *iv) {
  214. struct openssl_aes_cipher *cipher = aws_mem_calloc(allocator, 1, sizeof(struct openssl_aes_cipher));
  215. cipher->cipher_base.allocator = allocator;
  216. cipher->cipher_base.block_size = AWS_AES_256_CIPHER_BLOCK_SIZE;
  217. cipher->cipher_base.key_length_bits = AWS_AES_256_KEY_BIT_LEN;
  218. cipher->cipher_base.vtable = &s_ctr_vtable;
  219. cipher->cipher_base.impl = cipher;
  220. if (key) {
  221. aws_byte_buf_init_copy_from_cursor(&cipher->cipher_base.key, allocator, *key);
  222. } else {
  223. aws_byte_buf_init(&cipher->cipher_base.key, allocator, AWS_AES_256_KEY_BYTE_LEN);
  224. aws_symmetric_cipher_generate_key(AWS_AES_256_KEY_BYTE_LEN, &cipher->cipher_base.key);
  225. }
  226. if (iv) {
  227. aws_byte_buf_init_copy_from_cursor(&cipher->cipher_base.iv, allocator, *iv);
  228. } else {
  229. aws_byte_buf_init(&cipher->cipher_base.iv, allocator, AWS_AES_256_CIPHER_BLOCK_SIZE);
  230. aws_symmetric_cipher_generate_initialization_vector(
  231. AWS_AES_256_CIPHER_BLOCK_SIZE, true, &cipher->cipher_base.iv);
  232. }
  233. /* EVP_CIPHER_CTX_init() will be called inside EVP_CIPHER_CTX_new(). */
  234. cipher->encryptor_ctx = EVP_CIPHER_CTX_new();
  235. AWS_FATAL_ASSERT(cipher->encryptor_ctx && "Cipher initialization failed!");
  236. /* EVP_CIPHER_CTX_init() will be called inside EVP_CIPHER_CTX_new(). */
  237. cipher->decryptor_ctx = EVP_CIPHER_CTX_new();
  238. AWS_FATAL_ASSERT(cipher->decryptor_ctx && "Cipher initialization failed!");
  239. if (s_init_ctr_cipher_materials(&cipher->cipher_base) != AWS_OP_SUCCESS) {
  240. goto error;
  241. }
  242. cipher->cipher_base.good = true;
  243. return &cipher->cipher_base;
  244. error:
  245. s_destroy(&cipher->cipher_base);
  246. return NULL;
  247. }
  248. static int s_finalize_gcm_encryption(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out) {
  249. struct openssl_aes_cipher *openssl_cipher = cipher->impl;
  250. int ret_val = s_finalize_encryption(cipher, out);
  251. if (ret_val == AWS_OP_SUCCESS) {
  252. if (!cipher->tag.len) {
  253. if (!EVP_CIPHER_CTX_ctrl(
  254. openssl_cipher->encryptor_ctx,
  255. EVP_CTRL_GCM_GET_TAG,
  256. (int)cipher->tag.capacity,
  257. cipher->tag.buffer)) {
  258. cipher->good = false;
  259. return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
  260. }
  261. cipher->tag.len = AWS_AES_256_CIPHER_BLOCK_SIZE;
  262. }
  263. }
  264. return ret_val;
  265. }
  266. static int s_init_gcm_cipher_materials(struct aws_symmetric_cipher *cipher) {
  267. struct openssl_aes_cipher *openssl_cipher = cipher->impl;
  268. if (!(EVP_EncryptInit_ex(openssl_cipher->encryptor_ctx, EVP_aes_256_gcm(), NULL, NULL, NULL) &&
  269. EVP_EncryptInit_ex(
  270. openssl_cipher->encryptor_ctx,
  271. NULL,
  272. NULL,
  273. openssl_cipher->cipher_base.key.buffer,
  274. openssl_cipher->cipher_base.iv.buffer) &&
  275. EVP_CIPHER_CTX_set_padding(openssl_cipher->encryptor_ctx, 0)) ||
  276. !(EVP_DecryptInit_ex(openssl_cipher->decryptor_ctx, EVP_aes_256_gcm(), NULL, NULL, NULL) &&
  277. EVP_DecryptInit_ex(
  278. openssl_cipher->decryptor_ctx,
  279. NULL,
  280. NULL,
  281. openssl_cipher->cipher_base.key.buffer,
  282. openssl_cipher->cipher_base.iv.buffer) &&
  283. EVP_CIPHER_CTX_set_padding(openssl_cipher->decryptor_ctx, 0))) {
  284. return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
  285. }
  286. if (openssl_cipher->cipher_base.aad.len) {
  287. int outLen = 0;
  288. if (!EVP_EncryptUpdate(
  289. openssl_cipher->encryptor_ctx,
  290. NULL,
  291. &outLen,
  292. openssl_cipher->cipher_base.aad.buffer,
  293. (int)openssl_cipher->cipher_base.aad.len) ||
  294. !EVP_DecryptUpdate(
  295. openssl_cipher->decryptor_ctx,
  296. NULL,
  297. &outLen,
  298. openssl_cipher->cipher_base.aad.buffer,
  299. (int)openssl_cipher->cipher_base.aad.len)) {
  300. return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
  301. }
  302. }
  303. if (openssl_cipher->cipher_base.tag.len) {
  304. if (!EVP_CIPHER_CTX_ctrl(
  305. openssl_cipher->decryptor_ctx,
  306. EVP_CTRL_GCM_SET_TAG,
  307. (int)openssl_cipher->cipher_base.tag.len,
  308. openssl_cipher->cipher_base.tag.buffer)) {
  309. return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
  310. }
  311. }
  312. return AWS_OP_SUCCESS;
  313. }
  314. static int s_reset_gcm_cipher_materials(struct aws_symmetric_cipher *cipher) {
  315. int ret_val = s_clear_reusable_state(cipher);
  316. if (ret_val == AWS_OP_SUCCESS) {
  317. return s_init_gcm_cipher_materials(cipher);
  318. }
  319. return ret_val;
  320. }
  321. static struct aws_symmetric_cipher_vtable s_gcm_vtable = {
  322. .alg_name = "AES-GCM 256",
  323. .provider = "OpenSSL Compatible LibCrypto",
  324. .destroy = s_destroy,
  325. .reset = s_reset_gcm_cipher_materials,
  326. .decrypt = s_decrypt,
  327. .encrypt = s_encrypt,
  328. .finalize_decryption = s_finalize_decryption,
  329. .finalize_encryption = s_finalize_gcm_encryption,
  330. };
  331. struct aws_symmetric_cipher *aws_aes_gcm_256_new_impl(
  332. struct aws_allocator *allocator,
  333. const struct aws_byte_cursor *key,
  334. const struct aws_byte_cursor *iv,
  335. const struct aws_byte_cursor *aad,
  336. const struct aws_byte_cursor *decryption_tag) {
  337. struct openssl_aes_cipher *cipher = aws_mem_calloc(allocator, 1, sizeof(struct openssl_aes_cipher));
  338. cipher->cipher_base.allocator = allocator;
  339. cipher->cipher_base.block_size = AWS_AES_256_CIPHER_BLOCK_SIZE;
  340. cipher->cipher_base.key_length_bits = AWS_AES_256_KEY_BIT_LEN;
  341. cipher->cipher_base.vtable = &s_gcm_vtable;
  342. cipher->cipher_base.impl = cipher;
  343. /* Copy key into the cipher context. */
  344. if (key) {
  345. aws_byte_buf_init_copy_from_cursor(&cipher->cipher_base.key, allocator, *key);
  346. } else {
  347. aws_byte_buf_init(&cipher->cipher_base.key, allocator, AWS_AES_256_KEY_BYTE_LEN);
  348. aws_symmetric_cipher_generate_key(AWS_AES_256_KEY_BYTE_LEN, &cipher->cipher_base.key);
  349. }
  350. /* Copy initialization vector into the cipher context. */
  351. if (iv) {
  352. aws_byte_buf_init_copy_from_cursor(&cipher->cipher_base.iv, allocator, *iv);
  353. } else {
  354. aws_byte_buf_init(&cipher->cipher_base.iv, allocator, AWS_AES_256_CIPHER_BLOCK_SIZE - 4);
  355. aws_symmetric_cipher_generate_initialization_vector(
  356. AWS_AES_256_CIPHER_BLOCK_SIZE - 4, false, &cipher->cipher_base.iv);
  357. }
  358. /* Initialize the cipher contexts. */
  359. cipher->encryptor_ctx = EVP_CIPHER_CTX_new();
  360. AWS_FATAL_ASSERT(cipher->encryptor_ctx && "Encryptor cipher initialization failed!");
  361. cipher->decryptor_ctx = EVP_CIPHER_CTX_new();
  362. AWS_FATAL_ASSERT(cipher->decryptor_ctx && "Decryptor cipher initialization failed!");
  363. /* Set AAD if provided */
  364. if (aad) {
  365. aws_byte_buf_init_copy_from_cursor(&cipher->cipher_base.aad, allocator, *aad);
  366. }
  367. /* Set tag for the decryptor to use.*/
  368. if (decryption_tag) {
  369. aws_byte_buf_init_copy_from_cursor(&cipher->cipher_base.tag, allocator, *decryption_tag);
  370. } else {
  371. /* we'll need this later when we grab the tag during encryption time. */
  372. aws_byte_buf_init(&cipher->cipher_base.tag, allocator, AWS_AES_256_CIPHER_BLOCK_SIZE);
  373. }
  374. /* Initialize the cipher contexts with the specified key and IV. */
  375. if (s_init_gcm_cipher_materials(&cipher->cipher_base)) {
  376. goto error;
  377. }
  378. cipher->cipher_base.good = true;
  379. return &cipher->cipher_base;
  380. error:
  381. s_destroy(&cipher->cipher_base);
  382. return NULL;
  383. }
  384. static int s_key_wrap_encrypt_decrypt(
  385. struct aws_symmetric_cipher *cipher,
  386. struct aws_byte_cursor input,
  387. struct aws_byte_buf *out) {
  388. (void)out;
  389. struct openssl_aes_cipher *openssl_cipher = cipher->impl;
  390. return aws_byte_buf_append_dynamic(&openssl_cipher->working_buffer, &input);
  391. }
  392. static const size_t MIN_CEK_LENGTH_BYTES = 128 / 8;
  393. static const unsigned char INTEGRITY_VALUE = 0xA6;
  394. #define KEYWRAP_BLOCK_SIZE 8u
  395. static int s_key_wrap_finalize_encryption(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out) {
  396. struct openssl_aes_cipher *openssl_cipher = cipher->impl;
  397. if (openssl_cipher->working_buffer.len < MIN_CEK_LENGTH_BYTES) {
  398. cipher->good = false;
  399. return aws_raise_error(AWS_ERROR_INVALID_STATE);
  400. }
  401. /* the following is an in place implementation of
  402. RFC 3394 using the alternate in-place implementation.
  403. we use one in-place buffer instead of the copy at the end.
  404. the one letter variable names are meant to directly reflect the variables in the RFC */
  405. size_t required_buffer_space = openssl_cipher->working_buffer.len + cipher->block_size;
  406. size_t starting_len_offset = out->len;
  407. if (aws_symmetric_cipher_try_ensure_sufficient_buffer_space(out, required_buffer_space)) {
  408. return aws_raise_error(AWS_ERROR_SHORT_BUFFER);
  409. }
  410. /* put the integrity check register in the first 8 bytes of the final buffer. */
  411. aws_byte_buf_write_u8_n(out, INTEGRITY_VALUE, KEYWRAP_BLOCK_SIZE);
  412. uint8_t *a = out->buffer + starting_len_offset;
  413. struct aws_byte_cursor working_buf_cur = aws_byte_cursor_from_buf(&openssl_cipher->working_buffer);
  414. aws_byte_buf_write_from_whole_cursor(out, working_buf_cur);
  415. /* put the register buffer after the integrity check register */
  416. uint8_t *r = out->buffer + starting_len_offset + KEYWRAP_BLOCK_SIZE;
  417. int n = (int)(openssl_cipher->working_buffer.len / KEYWRAP_BLOCK_SIZE);
  418. uint8_t b_buf[KEYWRAP_BLOCK_SIZE * 2] = {0};
  419. struct aws_byte_buf b = aws_byte_buf_from_empty_array(b_buf, sizeof(b_buf));
  420. int b_out_len = b.capacity;
  421. uint8_t temp_buf[KEYWRAP_BLOCK_SIZE * 2] = {0};
  422. struct aws_byte_buf temp_input = aws_byte_buf_from_empty_array(temp_buf, sizeof(temp_buf));
  423. for (int j = 0; j <= 5; ++j) {
  424. for (int i = 1; i <= n; ++i) {
  425. /* concat A and R[i], A should be most significant and then R[i] should be least significant. */
  426. memcpy(temp_input.buffer, a, KEYWRAP_BLOCK_SIZE);
  427. memcpy(temp_input.buffer + KEYWRAP_BLOCK_SIZE, r, KEYWRAP_BLOCK_SIZE);
  428. /* encrypt the concatenated A and R[I] and store it in B */
  429. if (!EVP_EncryptUpdate(
  430. openssl_cipher->encryptor_ctx, b.buffer, &b_out_len, temp_input.buffer, (int)temp_input.capacity)) {
  431. cipher->good = false;
  432. return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
  433. }
  434. unsigned char t = (unsigned char)((n * j) + i);
  435. /* put the 64 MSB ^ T into A */
  436. memcpy(a, b.buffer, KEYWRAP_BLOCK_SIZE);
  437. a[7] ^= t;
  438. /* put the 64 LSB into R[i] */
  439. memcpy(r, b.buffer + KEYWRAP_BLOCK_SIZE, KEYWRAP_BLOCK_SIZE);
  440. /* increment i -> R[i] */
  441. r += KEYWRAP_BLOCK_SIZE;
  442. }
  443. /* reset R */
  444. r = out->buffer + starting_len_offset + KEYWRAP_BLOCK_SIZE;
  445. }
  446. return AWS_OP_SUCCESS;
  447. }
  448. static int s_key_wrap_finalize_decryption(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out) {
  449. struct openssl_aes_cipher *openssl_cipher = cipher->impl;
  450. if (openssl_cipher->working_buffer.len < MIN_CEK_LENGTH_BYTES + KEYWRAP_BLOCK_SIZE) {
  451. cipher->good = false;
  452. return aws_raise_error(AWS_ERROR_INVALID_STATE);
  453. }
  454. /* the following is an in place implementation of
  455. RFC 3394 using the alternate in-place implementation.
  456. we use one in-place buffer instead of the copy at the end.
  457. the one letter variable names are meant to directly reflect the variables in the RFC */
  458. size_t required_buffer_space = openssl_cipher->working_buffer.len - KEYWRAP_BLOCK_SIZE;
  459. size_t starting_len_offset = out->len;
  460. if (aws_symmetric_cipher_try_ensure_sufficient_buffer_space(out, required_buffer_space)) {
  461. return aws_raise_error(AWS_ERROR_SHORT_BUFFER);
  462. }
  463. memcpy(
  464. out->buffer + starting_len_offset,
  465. openssl_cipher->working_buffer.buffer + KEYWRAP_BLOCK_SIZE,
  466. required_buffer_space);
  467. /* integrity register should be the first 8 bytes of the final buffer. */
  468. uint8_t *a = openssl_cipher->working_buffer.buffer;
  469. /* in-place register is the plaintext. For decryption, start at the last array position (8 bytes before the end); */
  470. uint8_t *r = out->buffer + starting_len_offset + required_buffer_space - KEYWRAP_BLOCK_SIZE;
  471. int n = (int)(required_buffer_space / KEYWRAP_BLOCK_SIZE);
  472. uint8_t b_buf[KEYWRAP_BLOCK_SIZE * 10] = {0};
  473. struct aws_byte_buf b = aws_byte_buf_from_empty_array(b_buf, sizeof(b_buf));
  474. int b_out_len = b.capacity;
  475. uint8_t temp_buf[KEYWRAP_BLOCK_SIZE * 2] = {0};
  476. struct aws_byte_buf temp_input = aws_byte_buf_from_empty_array(temp_buf, sizeof(temp_buf));
  477. for (int j = 5; j >= 0; --j) {
  478. for (int i = n; i >= 1; --i) {
  479. /* concat A and T */
  480. memcpy(temp_input.buffer, a, KEYWRAP_BLOCK_SIZE);
  481. unsigned char t = (unsigned char)((n * j) + i);
  482. temp_input.buffer[7] ^= t;
  483. /* R[i] */
  484. memcpy(temp_input.buffer + KEYWRAP_BLOCK_SIZE, r, KEYWRAP_BLOCK_SIZE);
  485. /* Decrypt the concatenated buffer */
  486. if (!EVP_DecryptUpdate(
  487. openssl_cipher->decryptor_ctx, b.buffer, &b_out_len, temp_input.buffer, (int)temp_input.capacity)) {
  488. cipher->good = false;
  489. return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
  490. }
  491. /* set A to 64 MSB of decrypted result */
  492. memcpy(a, b.buffer, KEYWRAP_BLOCK_SIZE);
  493. /* set the R[i] to the 64 LSB of decrypted result */
  494. memcpy(r, b.buffer + KEYWRAP_BLOCK_SIZE, KEYWRAP_BLOCK_SIZE);
  495. /* decrement i -> R[i] */
  496. r -= KEYWRAP_BLOCK_SIZE;
  497. }
  498. /* reset R */
  499. r = out->buffer + starting_len_offset + required_buffer_space - KEYWRAP_BLOCK_SIZE;
  500. }
  501. /* here we perform the integrity check to make sure A == 0xA6A6A6A6A6A6A6A6 */
  502. for (size_t i = 0; i < KEYWRAP_BLOCK_SIZE; ++i) {
  503. if (a[i] != INTEGRITY_VALUE) {
  504. cipher->good = false;
  505. return aws_raise_error(AWS_ERROR_CAL_SIGNATURE_VALIDATION_FAILED);
  506. }
  507. }
  508. out->len += required_buffer_space;
  509. return AWS_OP_SUCCESS;
  510. }
  511. static int s_init_keywrap_cipher_materials(struct aws_symmetric_cipher *cipher) {
  512. struct openssl_aes_cipher *openssl_cipher = cipher->impl;
  513. if (!(EVP_EncryptInit_ex(openssl_cipher->encryptor_ctx, EVP_aes_256_ecb(), NULL, cipher->key.buffer, NULL) &&
  514. EVP_CIPHER_CTX_set_padding(openssl_cipher->encryptor_ctx, 0)) ||
  515. !(EVP_DecryptInit_ex(openssl_cipher->decryptor_ctx, EVP_aes_256_ecb(), NULL, cipher->key.buffer, NULL) &&
  516. EVP_CIPHER_CTX_set_padding(openssl_cipher->decryptor_ctx, 0))) {
  517. cipher->good = false;
  518. return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
  519. }
  520. return AWS_OP_SUCCESS;
  521. }
  522. static int s_reset_keywrap_cipher_materials(struct aws_symmetric_cipher *cipher) {
  523. int ret_val = s_clear_reusable_state(cipher);
  524. if (ret_val == AWS_OP_SUCCESS) {
  525. return s_init_keywrap_cipher_materials(cipher);
  526. }
  527. return ret_val;
  528. }
  529. static struct aws_symmetric_cipher_vtable s_keywrap_vtable = {
  530. .alg_name = "AES-KEYWRAP 256",
  531. .provider = "OpenSSL Compatible LibCrypto",
  532. .destroy = s_destroy,
  533. .reset = s_reset_keywrap_cipher_materials,
  534. .decrypt = s_key_wrap_encrypt_decrypt,
  535. .encrypt = s_key_wrap_encrypt_decrypt,
  536. .finalize_decryption = s_key_wrap_finalize_decryption,
  537. .finalize_encryption = s_key_wrap_finalize_encryption,
  538. };
  539. struct aws_symmetric_cipher *aws_aes_keywrap_256_new_impl(
  540. struct aws_allocator *allocator,
  541. const struct aws_byte_cursor *key) {
  542. struct openssl_aes_cipher *cipher = aws_mem_calloc(allocator, 1, sizeof(struct openssl_aes_cipher));
  543. cipher->cipher_base.allocator = allocator;
  544. cipher->cipher_base.block_size = KEYWRAP_BLOCK_SIZE;
  545. cipher->cipher_base.key_length_bits = AWS_AES_256_KEY_BIT_LEN;
  546. cipher->cipher_base.vtable = &s_keywrap_vtable;
  547. cipher->cipher_base.impl = cipher;
  548. /* Copy key into the cipher context. */
  549. if (key) {
  550. aws_byte_buf_init_copy_from_cursor(&cipher->cipher_base.key, allocator, *key);
  551. } else {
  552. aws_byte_buf_init(&cipher->cipher_base.key, allocator, AWS_AES_256_KEY_BYTE_LEN);
  553. aws_symmetric_cipher_generate_key(AWS_AES_256_KEY_BYTE_LEN, &cipher->cipher_base.key);
  554. }
  555. aws_byte_buf_init(&cipher->working_buffer, allocator, KEYWRAP_BLOCK_SIZE);
  556. /* Initialize the cipher contexts. */
  557. cipher->encryptor_ctx = EVP_CIPHER_CTX_new();
  558. AWS_FATAL_ASSERT(cipher->encryptor_ctx && "Encryptor cipher initialization failed!");
  559. cipher->decryptor_ctx = EVP_CIPHER_CTX_new();
  560. AWS_FATAL_ASSERT(cipher->decryptor_ctx && "Decryptor cipher initialization failed!");
  561. /* Initialize the cipher contexts with the specified key and IV. */
  562. if (s_init_keywrap_cipher_materials(&cipher->cipher_base)) {
  563. goto error;
  564. }
  565. cipher->cipher_base.good = true;
  566. return &cipher->cipher_base;
  567. error:
  568. s_destroy(&cipher->cipher_base);
  569. return NULL;
  570. }