25
26 #include <sys/types.h>
27 #include <sys/systm.h>
28 #include <sys/ddi.h>
29 #include <sys/sysmacros.h>
30 #include <sys/strsun.h>
31 #include <sys/crypto/spi.h>
32 #include <modes/modes.h>
33 #include <sys/crypto/common.h>
34 #include "des_impl.h"
35 #ifndef _KERNEL
36 #include <strings.h>
37 #include <stdlib.h>
38 #endif /* !_KERNEL */
39
40 #if defined(__i386) || defined(__amd64)
41 #include <sys/byteorder.h>
42 #define UNALIGNED_POINTERS_PERMITTED
43 #endif
44
45 /* EXPORT DELETE START */
46
47 typedef struct keysched_s {
48 uint64_t ksch_encrypt[16];
49 uint64_t ksch_decrypt[16];
50 } keysched_t;
51
52 typedef struct keysched3_s {
53 uint64_t ksch_encrypt[48];
54 uint64_t ksch_decrypt[48];
55 } keysched3_t;
56
57 static void fix_des_parity(uint64_t *);
58
59 #ifndef sun4u
60
61 static const uint64_t sbox_table[8][64]=
62 {
63 /* BEGIN CSTYLED */
64 {
65 0x0000140140020000ULL, 0x0000000000000000ULL, 0x0000000140000000ULL, 0x0000140140020020ULL,
66 0x0000140140000020ULL, 0x0000000140020020ULL, 0x0000000000000020ULL, 0x0000000140000000ULL,
484 t = sbox_table[0][t >> 58] |
485 sbox_table[1][(t >> 44) & 63] |
486 sbox_table[2][(t >> 38) & 63] |
487 sbox_table[3][(t >> 32) & 63] |
488 sbox_table[4][(t >> 26) & 63] |
489 sbox_table[5][(t >> 15) & 63] |
490 sbox_table[6][(t >> 9) & 63] |
491 sbox_table[7][(t >> 3) & 63];
492 t = t^l;
493 l = r;
494 r = t;
495 }
496 r = l;
497 l = t;
498 }
499
500 return (des_fp(l, r));
501 }
502 #endif /* !sun4u */
503
504 /* EXPORT DELETE END */
505
506 int
507 des3_crunch_block(const void *cookie, const uint8_t block[DES_BLOCK_LEN],
508 uint8_t out_block[DES_BLOCK_LEN], boolean_t decrypt)
509 {
510 /* EXPORT DELETE START */
511 keysched3_t *ksch = (keysched3_t *)cookie;
512
513 /*
514 * The code below, that is always executed on LITTLE_ENDIAN machines,
515 * reverses bytes in the block. On BIG_ENDIAN, the same code
516 * copies the block without reversing bytes.
517 */
518 #ifdef _BIG_ENDIAN
519 if (IS_P2ALIGNED(block, sizeof (uint64_t)) &&
520 IS_P2ALIGNED(out_block, sizeof (uint64_t))) {
521 if (decrypt == B_TRUE)
522 /* LINTED */
523 *(uint64_t *)out_block = des_crypt_impl(
524 ksch->ksch_decrypt, /* LINTED */
525 *(uint64_t *)block, 3);
526 else
527 /* LINTED */
528 *(uint64_t *)out_block = des_crypt_impl(
529 ksch->ksch_encrypt, /* LINTED */
530 *(uint64_t *)block, 3);
543 #endif /* UNALIGNED_POINTERS_PERMITTED */
544
545 if (decrypt == B_TRUE)
546 tmp = des_crypt_impl(ksch->ksch_decrypt, tmp, 3);
547 else
548 tmp = des_crypt_impl(ksch->ksch_encrypt, tmp, 3);
549
550 #ifdef UNALIGNED_POINTERS_PERMITTED
551 *(uint64_t *)(void *)&out_block[0] = htonll(tmp);
552 #else
553 out_block[0] = tmp >> 56;
554 out_block[1] = tmp >> 48;
555 out_block[2] = tmp >> 40;
556 out_block[3] = tmp >> 32;
557 out_block[4] = tmp >> 24;
558 out_block[5] = tmp >> 16;
559 out_block[6] = tmp >> 8;
560 out_block[7] = (uint8_t)tmp;
561 #endif /* UNALIGNED_POINTERS_PERMITTED */
562 }
563 /* EXPORT DELETE END */
564 return (CRYPTO_SUCCESS);
565 }
566
567 int
568 des_crunch_block(const void *cookie, const uint8_t block[DES_BLOCK_LEN],
569 uint8_t out_block[DES_BLOCK_LEN], boolean_t decrypt)
570 {
571 /* EXPORT DELETE START */
572 keysched_t *ksch = (keysched_t *)cookie;
573
574 /*
575 * The code below, that is always executed on LITTLE_ENDIAN machines,
576 * reverses bytes in the block. On BIG_ENDIAN, the same code
577 * copies the block without reversing bytes.
578 */
579 #ifdef _BIG_ENDIAN
580 if (IS_P2ALIGNED(block, sizeof (uint64_t)) &&
581 IS_P2ALIGNED(out_block, sizeof (uint64_t))) {
582 if (decrypt == B_TRUE)
583 /* LINTED */
584 *(uint64_t *)out_block = des_crypt_impl(
585 ksch->ksch_decrypt, /* LINTED */
586 *(uint64_t *)block, 1);
587 else
588 /* LINTED */
589 *(uint64_t *)out_block = des_crypt_impl(
590 ksch->ksch_encrypt, /* LINTED */
591 *(uint64_t *)block, 1);
606
607
608 if (decrypt == B_TRUE)
609 tmp = des_crypt_impl(ksch->ksch_decrypt, tmp, 1);
610 else
611 tmp = des_crypt_impl(ksch->ksch_encrypt, tmp, 1);
612
613 #ifdef UNALIGNED_POINTERS_PERMITTED
614 *(uint64_t *)(void *)&out_block[0] = htonll(tmp);
615 #else
616 out_block[0] = tmp >> 56;
617 out_block[1] = tmp >> 48;
618 out_block[2] = tmp >> 40;
619 out_block[3] = tmp >> 32;
620 out_block[4] = tmp >> 24;
621 out_block[5] = tmp >> 16;
622 out_block[6] = tmp >> 8;
623 out_block[7] = (uint8_t)tmp;
624 #endif /* UNALIGNED_POINTERS_PERMITTED */
625 }
626 /* EXPORT DELETE END */
627 return (CRYPTO_SUCCESS);
628 }
629
630 static boolean_t
631 keycheck(uint8_t *key, uint8_t *corrected_key)
632 {
633 /* EXPORT DELETE START */
634 uint64_t key_so_far;
635 uint_t i;
636 /*
637 * Table of weak and semi-weak keys. Fortunately, weak keys are
638 * endian-independent, and some semi-weak keys can be paired up in
639 * endian-opposite order. Since keys are stored as uint64_t's,
640 * use the ifdef _LITTLE_ENDIAN where appropriate.
641 */
642 static uint64_t des_weak_keys[] = {
643 /* Really weak keys. Byte-order independent values. */
644 0x0101010101010101ULL,
645 0x1f1f1f1f0e0e0e0eULL,
646 0xe0e0e0e0f1f1f1f1ULL,
647 0xfefefefefefefefeULL,
648
649 /* Semi-weak (and a few possibly-weak) keys. */
650
651 /* Byte-order independent semi-weak keys. */
652 0x01fe01fe01fe01feULL, 0xfe01fe01fe01fe01ULL,
653
699
700 if (corrected_key != NULL) {
701 #ifdef UNALIGNED_POINTERS_PERMITTED
702 *(uint64_t *)(void *)&corrected_key[0] = htonll(key_so_far);
703 #else
704 /*
705 * The code below reverses the bytes on LITTLE_ENDIAN machines.
706 * On BIG_ENDIAN, the same code copies without reversing
707 * the bytes.
708 */
709 corrected_key[0] = key_so_far >> 56;
710 corrected_key[1] = key_so_far >> 48;
711 corrected_key[2] = key_so_far >> 40;
712 corrected_key[3] = key_so_far >> 32;
713 corrected_key[4] = key_so_far >> 24;
714 corrected_key[5] = key_so_far >> 16;
715 corrected_key[6] = key_so_far >> 8;
716 corrected_key[7] = (uint8_t)key_so_far;
717 #endif /* UNALIGNED_POINTERS_PERMITTED */
718 }
719 /* EXPORT DELETE END */
720 return (B_TRUE);
721 }
722
723 static boolean_t
724 des23_keycheck(uint8_t *key, uint8_t *corrected_key, boolean_t des3)
725 {
726 /* EXPORT DELETE START */
727 uint64_t aligned_key[DES3_KEYSIZE / sizeof (uint64_t)];
728 uint64_t key_so_far, scratch, *currentkey;
729 uint_t j, num_weakkeys = 0;
730 uint8_t keysize = DES3_KEYSIZE;
731 uint8_t checks = 3;
732
733 if (key == NULL) {
734 return (B_FALSE);
735 }
736
737 if (des3 == B_FALSE) {
738 keysize = DES2_KEYSIZE;
739 checks = 2;
740 }
741
742 if (!IS_P2ALIGNED(key, sizeof (uint64_t))) {
743 bcopy(key, aligned_key, keysize);
744 currentkey = (uint64_t *)aligned_key;
745 } else {
746 /* LINTED */
764 * (since this DES key is weak) and
765 * continue on.
766 */
767 }
768
769 currentkey[j] = scratch;
770 }
771
772 /*
773 * Perform key equivalence checks, now that parity is properly set.
774 * 1st and 2nd keys must be unique, the 3rd key can be the same as
775 * the 1st key for the 2 key variant of 3DES.
776 */
777 if (currentkey[0] == currentkey[1] || currentkey[1] == currentkey[2])
778 return (B_FALSE);
779
780 if (corrected_key != NULL) {
781 bcopy(currentkey, corrected_key, keysize);
782 }
783
784 /* EXPORT DELETE END */
785 return (B_TRUE);
786 }
787
788 boolean_t
789 des_keycheck(uint8_t *key, des_strength_t strength, uint8_t *corrected_key)
790 {
791 if (strength == DES) {
792 return (keycheck(key, corrected_key));
793 } else if (strength == DES2) {
794 return (des23_keycheck(key, corrected_key, B_FALSE));
795 } else if (strength == DES3) {
796 return (des23_keycheck(key, corrected_key, B_TRUE));
797 } else {
798 return (B_FALSE);
799 }
800 }
801
802 void
803 des_parity_fix(uint8_t *key, des_strength_t strength, uint8_t *corrected_key)
804 {
805 /* EXPORT DELETE START */
806 uint64_t aligned_key[DES3_KEYSIZE / sizeof (uint64_t)];
807 uint8_t *paritied_key;
808 uint64_t key_so_far;
809 int i = 0, offset = 0;
810
811 if (strength == DES)
812 bcopy(key, aligned_key, DES_KEYSIZE);
813 else
814 bcopy(key, aligned_key, DES3_KEYSIZE);
815
816 paritied_key = (uint8_t *)aligned_key;
817 while (strength > i) {
818 offset = 8 * i;
819 #ifdef UNALIGNED_POINTERS_PERMITTED
820 key_so_far = htonll(*(uint64_t *)(void *)&paritied_key[offset]);
821 #else
822 key_so_far = (((uint64_t)paritied_key[offset + 0] << 56) |
823 ((uint64_t)paritied_key[offset + 1] << 48) |
824 ((uint64_t)paritied_key[offset + 2] << 40) |
825 ((uint64_t)paritied_key[offset + 3] << 32) |
831
832 fix_des_parity(&key_so_far);
833
834 #ifdef UNALIGNED_POINTERS_PERMITTED
835 *(uint64_t *)(void *)&paritied_key[offset] = htonll(key_so_far);
836 #else
837 paritied_key[offset + 0] = key_so_far >> 56;
838 paritied_key[offset + 1] = key_so_far >> 48;
839 paritied_key[offset + 2] = key_so_far >> 40;
840 paritied_key[offset + 3] = key_so_far >> 32;
841 paritied_key[offset + 4] = key_so_far >> 24;
842 paritied_key[offset + 5] = key_so_far >> 16;
843 paritied_key[offset + 6] = key_so_far >> 8;
844 paritied_key[offset + 7] = (uint8_t)key_so_far;
845 #endif /* UNALIGNED_POINTERS_PERMITTED */
846
847 i++;
848 }
849
850 bcopy(paritied_key, corrected_key, DES_KEYSIZE * strength);
851 /* EXPORT DELETE END */
852 }
853
854
855 /*
856 * Initialize key schedule for DES, DES2, and DES3
857 */
858 void
859 des_init_keysched(uint8_t *cipherKey, des_strength_t strength, void *ks)
860 {
861 /* EXPORT DELETE START */
862 uint64_t *encryption_ks;
863 uint64_t *decryption_ks;
864 uint64_t keysched[48];
865 uint64_t key_uint64[3];
866 uint64_t tmp;
867 uint_t keysize, i, j;
868
869 switch (strength) {
870 case DES:
871 keysize = DES_KEYSIZE;
872 encryption_ks = ((keysched_t *)ks)->ksch_encrypt;
873 decryption_ks = ((keysched_t *)ks)->ksch_decrypt;
874 break;
875 case DES2:
876 keysize = DES2_KEYSIZE;
877 encryption_ks = ((keysched3_t *)ks)->ksch_encrypt;
878 decryption_ks = ((keysched3_t *)ks)->ksch_decrypt;
879 break;
880 case DES3:
881 keysize = DES3_KEYSIZE;
930 tmp = keysched[16+i];
931 keysched[16+i] = keysched[31-i];
932 keysched[31-i] = tmp;
933 }
934 des_ks(keysched+32, key_uint64[2]);
935 keysize = DES3_KEYSIZE;
936 }
937
938 /* save the encryption keyschedule */
939 bcopy(keysched, encryption_ks, keysize * 16);
940
941 /* reverse the key schedule */
942 for (i = 0; i < keysize; i++) {
943 tmp = keysched[i];
944 keysched[i] = keysched[2 * keysize - 1 - i];
945 keysched[2 * keysize -1 -i] = tmp;
946 }
947
948 /* save the decryption keyschedule */
949 bcopy(keysched, decryption_ks, keysize * 16);
950 /* EXPORT DELETE END */
951 }
952
953 /*
954 * Allocate key schedule.
955 */
956 /*ARGSUSED*/
957 void *
958 des_alloc_keysched(size_t *keysched_size, des_strength_t strength, int kmflag)
959 {
960 void *keysched;
961
962 /* EXPORT DELETE START */
963
964 size_t size;
965
966 switch (strength) {
967 case DES:
968 size = sizeof (keysched_t);
969 break;
970 case DES2:
971 case DES3:
972 size = sizeof (keysched3_t);
973 }
974
975 #ifdef _KERNEL
976 keysched = (keysched_t *)kmem_alloc(size, kmflag);
977 #else /* !_KERNEL */
978 keysched = (keysched_t *)malloc(size);
979 #endif /* _KERNEL */
980
981 if (keysched == NULL)
982 return (NULL);
983
984 if (keysched_size != NULL)
985 *keysched_size = size;
986
987 /* EXPORT DELETE END */
988
989 return (keysched);
990 }
991
992 /*
993 * Replace the LSB of each byte by the xor of the other
994 * 7 bits. The tricky thing is that the original contents of the LSBs
995 * are nullified by including them twice in the xor computation.
996 */
997 static void
998 fix_des_parity(uint64_t *keyp)
999 {
1000 /* EXPORT DELETE START */
1001 uint64_t k = *keyp;
1002 k ^= k >> 1;
1003 k ^= k >> 2;
1004 k ^= k >> 4;
1005 *keyp ^= (k & 0x0101010101010101ULL);
1006 *keyp ^= 0x0101010101010101ULL;
1007 /* EXPORT DELETE END */
1008 }
1009
1010 void
1011 des_copy_block(uint8_t *in, uint8_t *out)
1012 {
1013 if (IS_P2ALIGNED(in, sizeof (uint32_t)) &&
1014 IS_P2ALIGNED(out, sizeof (uint32_t))) {
1015 /* LINTED: pointer alignment */
1016 *(uint32_t *)&out[0] = *(uint32_t *)&in[0];
1017 /* LINTED: pointer alignment */
1018 *(uint32_t *)&out[4] = *(uint32_t *)&in[4];
1019 } else {
1020 DES_COPY_BLOCK(in, out);
1021 }
1022 }
1023
1024 /* XOR block of data into dest */
1025 void
1026 des_xor_block(uint8_t *data, uint8_t *dst)
1027 {
|
25
26 #include <sys/types.h>
27 #include <sys/systm.h>
28 #include <sys/ddi.h>
29 #include <sys/sysmacros.h>
30 #include <sys/strsun.h>
31 #include <sys/crypto/spi.h>
32 #include <modes/modes.h>
33 #include <sys/crypto/common.h>
34 #include "des_impl.h"
35 #ifndef _KERNEL
36 #include <strings.h>
37 #include <stdlib.h>
38 #endif /* !_KERNEL */
39
40 #if defined(__i386) || defined(__amd64)
41 #include <sys/byteorder.h>
42 #define UNALIGNED_POINTERS_PERMITTED
43 #endif
44
45 typedef struct keysched_s {
46 uint64_t ksch_encrypt[16];
47 uint64_t ksch_decrypt[16];
48 } keysched_t;
49
50 typedef struct keysched3_s {
51 uint64_t ksch_encrypt[48];
52 uint64_t ksch_decrypt[48];
53 } keysched3_t;
54
55 static void fix_des_parity(uint64_t *);
56
57 #ifndef sun4u
58
59 static const uint64_t sbox_table[8][64]=
60 {
61 /* BEGIN CSTYLED */
62 {
63 0x0000140140020000ULL, 0x0000000000000000ULL, 0x0000000140000000ULL, 0x0000140140020020ULL,
64 0x0000140140000020ULL, 0x0000000140020020ULL, 0x0000000000000020ULL, 0x0000000140000000ULL,
482 t = sbox_table[0][t >> 58] |
483 sbox_table[1][(t >> 44) & 63] |
484 sbox_table[2][(t >> 38) & 63] |
485 sbox_table[3][(t >> 32) & 63] |
486 sbox_table[4][(t >> 26) & 63] |
487 sbox_table[5][(t >> 15) & 63] |
488 sbox_table[6][(t >> 9) & 63] |
489 sbox_table[7][(t >> 3) & 63];
490 t = t^l;
491 l = r;
492 r = t;
493 }
494 r = l;
495 l = t;
496 }
497
498 return (des_fp(l, r));
499 }
500 #endif /* !sun4u */
501
502 int
503 des3_crunch_block(const void *cookie, const uint8_t block[DES_BLOCK_LEN],
504 uint8_t out_block[DES_BLOCK_LEN], boolean_t decrypt)
505 {
506 keysched3_t *ksch = (keysched3_t *)cookie;
507
508 /*
509 * The code below, that is always executed on LITTLE_ENDIAN machines,
510 * reverses bytes in the block. On BIG_ENDIAN, the same code
511 * copies the block without reversing bytes.
512 */
513 #ifdef _BIG_ENDIAN
514 if (IS_P2ALIGNED(block, sizeof (uint64_t)) &&
515 IS_P2ALIGNED(out_block, sizeof (uint64_t))) {
516 if (decrypt == B_TRUE)
517 /* LINTED */
518 *(uint64_t *)out_block = des_crypt_impl(
519 ksch->ksch_decrypt, /* LINTED */
520 *(uint64_t *)block, 3);
521 else
522 /* LINTED */
523 *(uint64_t *)out_block = des_crypt_impl(
524 ksch->ksch_encrypt, /* LINTED */
525 *(uint64_t *)block, 3);
538 #endif /* UNALIGNED_POINTERS_PERMITTED */
539
540 if (decrypt == B_TRUE)
541 tmp = des_crypt_impl(ksch->ksch_decrypt, tmp, 3);
542 else
543 tmp = des_crypt_impl(ksch->ksch_encrypt, tmp, 3);
544
545 #ifdef UNALIGNED_POINTERS_PERMITTED
546 *(uint64_t *)(void *)&out_block[0] = htonll(tmp);
547 #else
548 out_block[0] = tmp >> 56;
549 out_block[1] = tmp >> 48;
550 out_block[2] = tmp >> 40;
551 out_block[3] = tmp >> 32;
552 out_block[4] = tmp >> 24;
553 out_block[5] = tmp >> 16;
554 out_block[6] = tmp >> 8;
555 out_block[7] = (uint8_t)tmp;
556 #endif /* UNALIGNED_POINTERS_PERMITTED */
557 }
558 return (CRYPTO_SUCCESS);
559 }
560
561 int
562 des_crunch_block(const void *cookie, const uint8_t block[DES_BLOCK_LEN],
563 uint8_t out_block[DES_BLOCK_LEN], boolean_t decrypt)
564 {
565 keysched_t *ksch = (keysched_t *)cookie;
566
567 /*
568 * The code below, that is always executed on LITTLE_ENDIAN machines,
569 * reverses bytes in the block. On BIG_ENDIAN, the same code
570 * copies the block without reversing bytes.
571 */
572 #ifdef _BIG_ENDIAN
573 if (IS_P2ALIGNED(block, sizeof (uint64_t)) &&
574 IS_P2ALIGNED(out_block, sizeof (uint64_t))) {
575 if (decrypt == B_TRUE)
576 /* LINTED */
577 *(uint64_t *)out_block = des_crypt_impl(
578 ksch->ksch_decrypt, /* LINTED */
579 *(uint64_t *)block, 1);
580 else
581 /* LINTED */
582 *(uint64_t *)out_block = des_crypt_impl(
583 ksch->ksch_encrypt, /* LINTED */
584 *(uint64_t *)block, 1);
599
600
601 if (decrypt == B_TRUE)
602 tmp = des_crypt_impl(ksch->ksch_decrypt, tmp, 1);
603 else
604 tmp = des_crypt_impl(ksch->ksch_encrypt, tmp, 1);
605
606 #ifdef UNALIGNED_POINTERS_PERMITTED
607 *(uint64_t *)(void *)&out_block[0] = htonll(tmp);
608 #else
609 out_block[0] = tmp >> 56;
610 out_block[1] = tmp >> 48;
611 out_block[2] = tmp >> 40;
612 out_block[3] = tmp >> 32;
613 out_block[4] = tmp >> 24;
614 out_block[5] = tmp >> 16;
615 out_block[6] = tmp >> 8;
616 out_block[7] = (uint8_t)tmp;
617 #endif /* UNALIGNED_POINTERS_PERMITTED */
618 }
619 return (CRYPTO_SUCCESS);
620 }
621
622 static boolean_t
623 keycheck(uint8_t *key, uint8_t *corrected_key)
624 {
625 uint64_t key_so_far;
626 uint_t i;
627 /*
628 * Table of weak and semi-weak keys. Fortunately, weak keys are
629 * endian-independent, and some semi-weak keys can be paired up in
630 * endian-opposite order. Since keys are stored as uint64_t's,
631 * use the ifdef _LITTLE_ENDIAN where appropriate.
632 */
633 static uint64_t des_weak_keys[] = {
634 /* Really weak keys. Byte-order independent values. */
635 0x0101010101010101ULL,
636 0x1f1f1f1f0e0e0e0eULL,
637 0xe0e0e0e0f1f1f1f1ULL,
638 0xfefefefefefefefeULL,
639
640 /* Semi-weak (and a few possibly-weak) keys. */
641
642 /* Byte-order independent semi-weak keys. */
643 0x01fe01fe01fe01feULL, 0xfe01fe01fe01fe01ULL,
644
690
691 if (corrected_key != NULL) {
692 #ifdef UNALIGNED_POINTERS_PERMITTED
693 *(uint64_t *)(void *)&corrected_key[0] = htonll(key_so_far);
694 #else
695 /*
696 * The code below reverses the bytes on LITTLE_ENDIAN machines.
697 * On BIG_ENDIAN, the same code copies without reversing
698 * the bytes.
699 */
700 corrected_key[0] = key_so_far >> 56;
701 corrected_key[1] = key_so_far >> 48;
702 corrected_key[2] = key_so_far >> 40;
703 corrected_key[3] = key_so_far >> 32;
704 corrected_key[4] = key_so_far >> 24;
705 corrected_key[5] = key_so_far >> 16;
706 corrected_key[6] = key_so_far >> 8;
707 corrected_key[7] = (uint8_t)key_so_far;
708 #endif /* UNALIGNED_POINTERS_PERMITTED */
709 }
710 return (B_TRUE);
711 }
712
713 static boolean_t
714 des23_keycheck(uint8_t *key, uint8_t *corrected_key, boolean_t des3)
715 {
716 uint64_t aligned_key[DES3_KEYSIZE / sizeof (uint64_t)];
717 uint64_t key_so_far, scratch, *currentkey;
718 uint_t j, num_weakkeys = 0;
719 uint8_t keysize = DES3_KEYSIZE;
720 uint8_t checks = 3;
721
722 if (key == NULL) {
723 return (B_FALSE);
724 }
725
726 if (des3 == B_FALSE) {
727 keysize = DES2_KEYSIZE;
728 checks = 2;
729 }
730
731 if (!IS_P2ALIGNED(key, sizeof (uint64_t))) {
732 bcopy(key, aligned_key, keysize);
733 currentkey = (uint64_t *)aligned_key;
734 } else {
735 /* LINTED */
753 * (since this DES key is weak) and
754 * continue on.
755 */
756 }
757
758 currentkey[j] = scratch;
759 }
760
761 /*
762 * Perform key equivalence checks, now that parity is properly set.
763 * 1st and 2nd keys must be unique, the 3rd key can be the same as
764 * the 1st key for the 2 key variant of 3DES.
765 */
766 if (currentkey[0] == currentkey[1] || currentkey[1] == currentkey[2])
767 return (B_FALSE);
768
769 if (corrected_key != NULL) {
770 bcopy(currentkey, corrected_key, keysize);
771 }
772
773 return (B_TRUE);
774 }
775
776 boolean_t
777 des_keycheck(uint8_t *key, des_strength_t strength, uint8_t *corrected_key)
778 {
779 if (strength == DES) {
780 return (keycheck(key, corrected_key));
781 } else if (strength == DES2) {
782 return (des23_keycheck(key, corrected_key, B_FALSE));
783 } else if (strength == DES3) {
784 return (des23_keycheck(key, corrected_key, B_TRUE));
785 } else {
786 return (B_FALSE);
787 }
788 }
789
790 void
791 des_parity_fix(uint8_t *key, des_strength_t strength, uint8_t *corrected_key)
792 {
793 uint64_t aligned_key[DES3_KEYSIZE / sizeof (uint64_t)];
794 uint8_t *paritied_key;
795 uint64_t key_so_far;
796 int i = 0, offset = 0;
797
798 if (strength == DES)
799 bcopy(key, aligned_key, DES_KEYSIZE);
800 else
801 bcopy(key, aligned_key, DES3_KEYSIZE);
802
803 paritied_key = (uint8_t *)aligned_key;
804 while (strength > i) {
805 offset = 8 * i;
806 #ifdef UNALIGNED_POINTERS_PERMITTED
807 key_so_far = htonll(*(uint64_t *)(void *)&paritied_key[offset]);
808 #else
809 key_so_far = (((uint64_t)paritied_key[offset + 0] << 56) |
810 ((uint64_t)paritied_key[offset + 1] << 48) |
811 ((uint64_t)paritied_key[offset + 2] << 40) |
812 ((uint64_t)paritied_key[offset + 3] << 32) |
818
819 fix_des_parity(&key_so_far);
820
821 #ifdef UNALIGNED_POINTERS_PERMITTED
822 *(uint64_t *)(void *)&paritied_key[offset] = htonll(key_so_far);
823 #else
824 paritied_key[offset + 0] = key_so_far >> 56;
825 paritied_key[offset + 1] = key_so_far >> 48;
826 paritied_key[offset + 2] = key_so_far >> 40;
827 paritied_key[offset + 3] = key_so_far >> 32;
828 paritied_key[offset + 4] = key_so_far >> 24;
829 paritied_key[offset + 5] = key_so_far >> 16;
830 paritied_key[offset + 6] = key_so_far >> 8;
831 paritied_key[offset + 7] = (uint8_t)key_so_far;
832 #endif /* UNALIGNED_POINTERS_PERMITTED */
833
834 i++;
835 }
836
837 bcopy(paritied_key, corrected_key, DES_KEYSIZE * strength);
838 }
839
840
841 /*
842 * Initialize key schedule for DES, DES2, and DES3
843 */
844 void
845 des_init_keysched(uint8_t *cipherKey, des_strength_t strength, void *ks)
846 {
847 uint64_t *encryption_ks;
848 uint64_t *decryption_ks;
849 uint64_t keysched[48];
850 uint64_t key_uint64[3];
851 uint64_t tmp;
852 uint_t keysize, i, j;
853
854 switch (strength) {
855 case DES:
856 keysize = DES_KEYSIZE;
857 encryption_ks = ((keysched_t *)ks)->ksch_encrypt;
858 decryption_ks = ((keysched_t *)ks)->ksch_decrypt;
859 break;
860 case DES2:
861 keysize = DES2_KEYSIZE;
862 encryption_ks = ((keysched3_t *)ks)->ksch_encrypt;
863 decryption_ks = ((keysched3_t *)ks)->ksch_decrypt;
864 break;
865 case DES3:
866 keysize = DES3_KEYSIZE;
915 tmp = keysched[16+i];
916 keysched[16+i] = keysched[31-i];
917 keysched[31-i] = tmp;
918 }
919 des_ks(keysched+32, key_uint64[2]);
920 keysize = DES3_KEYSIZE;
921 }
922
923 /* save the encryption keyschedule */
924 bcopy(keysched, encryption_ks, keysize * 16);
925
926 /* reverse the key schedule */
927 for (i = 0; i < keysize; i++) {
928 tmp = keysched[i];
929 keysched[i] = keysched[2 * keysize - 1 - i];
930 keysched[2 * keysize -1 -i] = tmp;
931 }
932
933 /* save the decryption keyschedule */
934 bcopy(keysched, decryption_ks, keysize * 16);
935 }
936
937 /*
938 * Allocate key schedule.
939 */
940 /*ARGSUSED*/
941 void *
942 des_alloc_keysched(size_t *keysched_size, des_strength_t strength, int kmflag)
943 {
944 void *keysched;
945
946 size_t size;
947
948 switch (strength) {
949 case DES:
950 size = sizeof (keysched_t);
951 break;
952 case DES2:
953 case DES3:
954 size = sizeof (keysched3_t);
955 }
956
957 #ifdef _KERNEL
958 keysched = (keysched_t *)kmem_alloc(size, kmflag);
959 #else /* !_KERNEL */
960 keysched = (keysched_t *)malloc(size);
961 #endif /* _KERNEL */
962
963 if (keysched == NULL)
964 return (NULL);
965
966 if (keysched_size != NULL)
967 *keysched_size = size;
968
969 return (keysched);
970 }
971
972 /*
973 * Replace the LSB of each byte by the xor of the other
974 * 7 bits. The tricky thing is that the original contents of the LSBs
975 * are nullified by including them twice in the xor computation.
976 */
977 static void
978 fix_des_parity(uint64_t *keyp)
979 {
980 uint64_t k = *keyp;
981 k ^= k >> 1;
982 k ^= k >> 2;
983 k ^= k >> 4;
984 *keyp ^= (k & 0x0101010101010101ULL);
985 *keyp ^= 0x0101010101010101ULL;
986 }
987
988 void
989 des_copy_block(uint8_t *in, uint8_t *out)
990 {
991 if (IS_P2ALIGNED(in, sizeof (uint32_t)) &&
992 IS_P2ALIGNED(out, sizeof (uint32_t))) {
993 /* LINTED: pointer alignment */
994 *(uint32_t *)&out[0] = *(uint32_t *)&in[0];
995 /* LINTED: pointer alignment */
996 *(uint32_t *)&out[4] = *(uint32_t *)&in[4];
997 } else {
998 DES_COPY_BLOCK(in, out);
999 }
1000 }
1001
1002 /* XOR block of data into dest */
1003 void
1004 des_xor_block(uint8_t *data, uint8_t *dst)
1005 {
|