Print this page
3882 remove xmod & friends


  70  *
  71  * @author Vincent Rijmen <vincent.rijmen@esat.kuleuven.ac.be>
  72  * @author Antoon Bosselaers <antoon.bosselaers@esat.kuleuven.ac.be>
  73  * @author Paulo Barreto <paulo.barreto@terra.com.br>
  74  *
  75  * This code is hereby placed in the public domain.
  76  *
  77  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ''AS IS'' AND ANY EXPRESS
  78  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  79  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  80  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE
  81  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  82  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  83  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
  84  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
  85  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
  86  * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
  87  * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  88  */
  89 
  90 /* EXPORT DELETE START */
  91 
  92 #if defined(sun4u)
  93 /* External assembly functions: */
  94 extern void aes_encrypt_impl(const uint32_t rk[], int Nr, const uint32_t pt[4],
  95         uint32_t ct[4]);
  96 extern void aes_decrypt_impl(const uint32_t rk[], int Nr, const uint32_t ct[4],
  97         uint32_t pt[4]);
  98 
  99 #define AES_ENCRYPT_IMPL(a, b, c, d, e) aes_encrypt_impl(a, b, c, d)
 100 #define AES_DECRYPT_IMPL(a, b, c, d, e) aes_decrypt_impl(a, b, c, d)
 101 
 102 #elif defined(__amd64)
 103 
 104 /* These functions are used to execute amd64 instructions for AMD or Intel: */
 105 extern int rijndael_key_setup_enc_amd64(uint32_t rk[],
 106         const uint32_t cipherKey[], int keyBits);
 107 extern int rijndael_key_setup_dec_amd64(uint32_t rk[],
 108         const uint32_t cipherKey[], int keyBits);
 109 extern void aes_encrypt_amd64(const uint32_t rk[], int Nr,
 110         const uint32_t pt[4], uint32_t ct[4]);
 111 extern void aes_decrypt_amd64(const uint32_t rk[], int Nr,


1539             (Td4[(t3 >>  8) & 0xff] & 0x0000ff00) ^
1540             (Td4[t2 & 0xff] & 0x000000ff) ^
1541             rk[1];
1542         pt[1] = s1;
1543 
1544         s2 = (Td4[t2 >> 24] & 0xff000000) ^
1545             (Td4[(t1 >> 16) & 0xff] & 0x00ff0000) ^
1546             (Td4[(t0 >> 8) & 0xff] & 0x0000ff00) ^
1547             (Td4[t3 & 0xff] & 0x000000ff) ^
1548             rk[2];
1549         pt[2] = s2;
1550 
1551         s3 = (Td4[t3 >> 24] & 0xff000000) ^
1552             (Td4[(t2 >> 16) & 0xff] & 0x00ff0000) ^
1553             (Td4[(t1 >>  8) & 0xff] & 0x0000ff00) ^
1554             (Td4[t0 & 0xff] & 0x000000ff) ^
1555             rk[3];
1556         pt[3] = s3;
1557 }
1558 #endif  /* sun4u, __amd64 */
1559 /* EXPORT DELETE END */
1560 
1561 
1562 /*
1563  * Initialize AES encryption and decryption key schedules.
1564  *
1565  * Parameters:
1566  * cipherKey    User key
1567  * keyBits      AES key size (128, 192, or 256 bits)
1568  * keysched     AES key schedule to be initialized, of type aes_key_t.
1569  *              Allocated by aes_alloc_keysched().
1570  */
1571 void
1572 aes_init_keysched(const uint8_t *cipherKey, uint_t keyBits, void *keysched)
1573 {
1574 /* EXPORT DELETE START */
1575         aes_key_t       *newbie = keysched;
1576         uint_t          keysize, i, j;
1577         union {
1578                 uint64_t        ka64[4];
1579                 uint32_t        ka32[8];
1580                 } keyarr;
1581 
1582         switch (keyBits) {
1583         case 128:
1584                 newbie->nr = 10;
1585                 break;
1586 
1587         case 192:
1588                 newbie->nr = 12;
1589                 break;
1590 
1591         case 256:
1592                 newbie->nr = 14;
1593                 break;
1594 


1607          * SPARCv8/v9 uses a key schedule array with 64-bit elements.
1608          * X86/AMD64  uses a key schedule array with 32-bit elements.
1609          */
1610 #ifndef AES_BYTE_SWAP
1611         if (IS_P2ALIGNED(cipherKey, sizeof (uint64_t))) {
1612                 for (i = 0, j = 0; j < keysize; i++, j += 8) {
1613                         /* LINTED: pointer alignment */
1614                         keyarr.ka64[i] = *((uint64_t *)&cipherKey[j]);
1615                 }
1616         } else {
1617                 bcopy(cipherKey, keyarr.ka32, keysize);
1618         }
1619 
1620 #else   /* byte swap */
1621         for (i = 0, j = 0; j < keysize; i++, j += 4) {
1622                 keyarr.ka32[i] = htonl(*(uint32_t *)(void *)&cipherKey[j]);
1623         }
1624 #endif
1625 
1626         aes_setupkeys(newbie, keyarr.ka32, keyBits);
1627 /* EXPORT DELETE END */
1628 }
1629 
1630 
1631 /*
1632  * Encrypt one block using AES.
1633  * Align if needed and (for x86 32-bit only) byte-swap.
1634  *
1635  * Parameters:
1636  * ks   Key schedule, of type aes_key_t
1637  * pt   Input block (plain text)
1638  * ct   Output block (crypto text).  Can overlap with pt
1639  */
1640 int
1641 aes_encrypt_block(const void *ks, const uint8_t *pt, uint8_t *ct)
1642 {
1643 /* EXPORT DELETE START */
1644         aes_key_t       *ksch = (aes_key_t *)ks;
1645 
1646 #ifndef AES_BYTE_SWAP
1647         if (IS_P2ALIGNED2(pt, ct, sizeof (uint32_t))) {
1648                 /* LINTED:  pointer alignment */
1649                 AES_ENCRYPT_IMPL(&ksch->encr_ks.ks32[0], ksch->nr,
1650                     /* LINTED:  pointer alignment */
1651                     (uint32_t *)pt, (uint32_t *)ct, ksch->flags);
1652         } else {
1653 #endif
1654                 uint32_t buffer[AES_BLOCK_LEN / sizeof (uint32_t)];
1655 
1656                 /* Copy input block into buffer */
1657 #ifndef AES_BYTE_SWAP
1658                 bcopy(pt, &buffer, AES_BLOCK_LEN);
1659 
1660 #else   /* byte swap */
1661                 buffer[0] = htonl(*(uint32_t *)(void *)&pt[0]);
1662                 buffer[1] = htonl(*(uint32_t *)(void *)&pt[4]);
1663                 buffer[2] = htonl(*(uint32_t *)(void *)&pt[8]);
1664                 buffer[3] = htonl(*(uint32_t *)(void *)&pt[12]);
1665 #endif
1666 
1667                 AES_ENCRYPT_IMPL(&ksch->encr_ks.ks32[0], ksch->nr,
1668                     buffer, buffer, ksch->flags);
1669 
1670                 /* Copy result from buffer to output block */
1671 #ifndef AES_BYTE_SWAP
1672                 bcopy(&buffer, ct, AES_BLOCK_LEN);
1673         }
1674 
1675 #else   /* byte swap */
1676                 *(uint32_t *)(void *)&ct[0] = htonl(buffer[0]);
1677                 *(uint32_t *)(void *)&ct[4] = htonl(buffer[1]);
1678                 *(uint32_t *)(void *)&ct[8] = htonl(buffer[2]);
1679                 *(uint32_t *)(void *)&ct[12] = htonl(buffer[3]);
1680 #endif
1681 /* EXPORT DELETE END */
1682         return (CRYPTO_SUCCESS);
1683 }
1684 
1685 
1686 /*
1687  * Decrypt one block using AES.
1688  * Align and byte-swap if needed.
1689  *
1690  * Parameters:
1691  * ks   Key schedule, of type aes_key_t
1692  * ct   Input block (crypto text)
1693  * pt   Output block (plain text). Can overlap with pt
1694  */
1695 int
1696 aes_decrypt_block(const void *ks, const uint8_t *ct, uint8_t *pt)
1697 {
1698 /* EXPORT DELETE START */
1699         aes_key_t       *ksch = (aes_key_t *)ks;
1700 
1701 #ifndef AES_BYTE_SWAP
1702         if (IS_P2ALIGNED2(ct, pt, sizeof (uint32_t))) {
1703                 /* LINTED:  pointer alignment */
1704                 AES_DECRYPT_IMPL(&ksch->decr_ks.ks32[0], ksch->nr,
1705                     /* LINTED:  pointer alignment */
1706                     (uint32_t *)ct, (uint32_t *)pt, ksch->flags);
1707         } else {
1708 #endif
1709                 uint32_t buffer[AES_BLOCK_LEN / sizeof (uint32_t)];
1710 
1711                 /* Copy input block into buffer */
1712 #ifndef AES_BYTE_SWAP
1713                 bcopy(ct, &buffer, AES_BLOCK_LEN);
1714 
1715 #else   /* byte swap */
1716                 buffer[0] = htonl(*(uint32_t *)(void *)&ct[0]);
1717                 buffer[1] = htonl(*(uint32_t *)(void *)&ct[4]);
1718                 buffer[2] = htonl(*(uint32_t *)(void *)&ct[8]);
1719                 buffer[3] = htonl(*(uint32_t *)(void *)&ct[12]);
1720 #endif
1721 
1722                 AES_DECRYPT_IMPL(&ksch->decr_ks.ks32[0], ksch->nr,
1723                     buffer, buffer, ksch->flags);
1724 
1725                 /* Copy result from buffer to output block */
1726 #ifndef AES_BYTE_SWAP
1727                 bcopy(&buffer, pt, AES_BLOCK_LEN);
1728         }
1729 
1730 #else   /* byte swap */
1731         *(uint32_t *)(void *)&pt[0] = htonl(buffer[0]);
1732         *(uint32_t *)(void *)&pt[4] = htonl(buffer[1]);
1733         *(uint32_t *)(void *)&pt[8] = htonl(buffer[2]);
1734         *(uint32_t *)(void *)&pt[12] = htonl(buffer[3]);
1735 #endif
1736 
1737 /* EXPORT DELETE END */
1738         return (CRYPTO_SUCCESS);
1739 }
1740 
1741 
1742 /*
1743  * Allocate key schedule for AES.
1744  *
1745  * Return the pointer and set size to the number of bytes allocated.
1746  * Memory allocated must be freed by the caller when done.
1747  *
1748  * Parameters:
1749  * size         Size of key schedule allocated, in bytes
1750  * kmflag       Flag passed to kmem_alloc(9F); ignored in userland.
1751  */
1752 /* ARGSUSED */
1753 void *
1754 aes_alloc_keysched(size_t *size, int kmflag)
1755 {
1756 /* EXPORT DELETE START */
1757         aes_key_t *keysched;
1758 
1759 #ifdef  _KERNEL
1760         keysched = (aes_key_t *)kmem_alloc(sizeof (aes_key_t), kmflag);
1761 #else   /* !_KERNEL */
1762         keysched = (aes_key_t *)malloc(sizeof (aes_key_t));
1763 #endif  /* _KERNEL */
1764 
1765         if (keysched != NULL) {
1766                 *size = sizeof (aes_key_t);
1767                 return (keysched);
1768         }
1769 /* EXPORT DELETE END */
1770         return (NULL);
1771 }
1772 
1773 
1774 #ifdef __amd64
1775 /*
1776  * Return 1 if executing on Intel with AES-NI instructions,
1777  * otherwise 0 (i.e., Intel without AES-NI or AMD64).
1778  * Cache the result, as the CPU can't change.
1779  *
1780  * Note: the userland version uses getisax().  The kernel version uses
1781  * global variable x86_featureset.
1782  */
1783 static int
1784 intel_aes_instructions_present(void)
1785 {
1786         static int      cached_result = -1;
1787 
1788         if (cached_result == -1) { /* first time */
1789 #ifdef _KERNEL


  70  *
  71  * @author Vincent Rijmen <vincent.rijmen@esat.kuleuven.ac.be>
  72  * @author Antoon Bosselaers <antoon.bosselaers@esat.kuleuven.ac.be>
  73  * @author Paulo Barreto <paulo.barreto@terra.com.br>
  74  *
  75  * This code is hereby placed in the public domain.
  76  *
  77  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ''AS IS'' AND ANY EXPRESS
  78  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  79  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  80  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE
  81  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  82  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  83  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
  84  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
  85  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
  86  * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
  87  * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  88  */
  89 


  90 #if defined(sun4u)
  91 /* External assembly functions: */
  92 extern void aes_encrypt_impl(const uint32_t rk[], int Nr, const uint32_t pt[4],
  93         uint32_t ct[4]);
  94 extern void aes_decrypt_impl(const uint32_t rk[], int Nr, const uint32_t ct[4],
  95         uint32_t pt[4]);
  96 
  97 #define AES_ENCRYPT_IMPL(a, b, c, d, e) aes_encrypt_impl(a, b, c, d)
  98 #define AES_DECRYPT_IMPL(a, b, c, d, e) aes_decrypt_impl(a, b, c, d)
  99 
 100 #elif defined(__amd64)
 101 
 102 /* These functions are used to execute amd64 instructions for AMD or Intel: */
 103 extern int rijndael_key_setup_enc_amd64(uint32_t rk[],
 104         const uint32_t cipherKey[], int keyBits);
 105 extern int rijndael_key_setup_dec_amd64(uint32_t rk[],
 106         const uint32_t cipherKey[], int keyBits);
 107 extern void aes_encrypt_amd64(const uint32_t rk[], int Nr,
 108         const uint32_t pt[4], uint32_t ct[4]);
 109 extern void aes_decrypt_amd64(const uint32_t rk[], int Nr,


1537             (Td4[(t3 >>  8) & 0xff] & 0x0000ff00) ^
1538             (Td4[t2 & 0xff] & 0x000000ff) ^
1539             rk[1];
1540         pt[1] = s1;
1541 
1542         s2 = (Td4[t2 >> 24] & 0xff000000) ^
1543             (Td4[(t1 >> 16) & 0xff] & 0x00ff0000) ^
1544             (Td4[(t0 >> 8) & 0xff] & 0x0000ff00) ^
1545             (Td4[t3 & 0xff] & 0x000000ff) ^
1546             rk[2];
1547         pt[2] = s2;
1548 
1549         s3 = (Td4[t3 >> 24] & 0xff000000) ^
1550             (Td4[(t2 >> 16) & 0xff] & 0x00ff0000) ^
1551             (Td4[(t1 >>  8) & 0xff] & 0x0000ff00) ^
1552             (Td4[t0 & 0xff] & 0x000000ff) ^
1553             rk[3];
1554         pt[3] = s3;
1555 }
1556 #endif  /* sun4u, __amd64 */

1557 
1558 
1559 /*
1560  * Initialize AES encryption and decryption key schedules.
1561  *
1562  * Parameters:
1563  * cipherKey    User key
1564  * keyBits      AES key size (128, 192, or 256 bits)
1565  * keysched     AES key schedule to be initialized, of type aes_key_t.
1566  *              Allocated by aes_alloc_keysched().
1567  */
1568 void
1569 aes_init_keysched(const uint8_t *cipherKey, uint_t keyBits, void *keysched)
1570 {

1571         aes_key_t       *newbie = keysched;
1572         uint_t          keysize, i, j;
1573         union {
1574                 uint64_t        ka64[4];
1575                 uint32_t        ka32[8];
1576                 } keyarr;
1577 
1578         switch (keyBits) {
1579         case 128:
1580                 newbie->nr = 10;
1581                 break;
1582 
1583         case 192:
1584                 newbie->nr = 12;
1585                 break;
1586 
1587         case 256:
1588                 newbie->nr = 14;
1589                 break;
1590 


1603          * SPARCv8/v9 uses a key schedule array with 64-bit elements.
1604          * X86/AMD64  uses a key schedule array with 32-bit elements.
1605          */
1606 #ifndef AES_BYTE_SWAP
1607         if (IS_P2ALIGNED(cipherKey, sizeof (uint64_t))) {
1608                 for (i = 0, j = 0; j < keysize; i++, j += 8) {
1609                         /* LINTED: pointer alignment */
1610                         keyarr.ka64[i] = *((uint64_t *)&cipherKey[j]);
1611                 }
1612         } else {
1613                 bcopy(cipherKey, keyarr.ka32, keysize);
1614         }
1615 
1616 #else   /* byte swap */
1617         for (i = 0, j = 0; j < keysize; i++, j += 4) {
1618                 keyarr.ka32[i] = htonl(*(uint32_t *)(void *)&cipherKey[j]);
1619         }
1620 #endif
1621 
1622         aes_setupkeys(newbie, keyarr.ka32, keyBits);

1623 }
1624 
1625 
1626 /*
1627  * Encrypt one block using AES.
1628  * Align if needed and (for x86 32-bit only) byte-swap.
1629  *
1630  * Parameters:
1631  * ks   Key schedule, of type aes_key_t
1632  * pt   Input block (plain text)
1633  * ct   Output block (crypto text).  Can overlap with pt
1634  */
1635 int
1636 aes_encrypt_block(const void *ks, const uint8_t *pt, uint8_t *ct)
1637 {

1638         aes_key_t       *ksch = (aes_key_t *)ks;
1639 
1640 #ifndef AES_BYTE_SWAP
1641         if (IS_P2ALIGNED2(pt, ct, sizeof (uint32_t))) {
1642                 /* LINTED:  pointer alignment */
1643                 AES_ENCRYPT_IMPL(&ksch->encr_ks.ks32[0], ksch->nr,
1644                     /* LINTED:  pointer alignment */
1645                     (uint32_t *)pt, (uint32_t *)ct, ksch->flags);
1646         } else {
1647 #endif
1648                 uint32_t buffer[AES_BLOCK_LEN / sizeof (uint32_t)];
1649 
1650                 /* Copy input block into buffer */
1651 #ifndef AES_BYTE_SWAP
1652                 bcopy(pt, &buffer, AES_BLOCK_LEN);
1653 
1654 #else   /* byte swap */
1655                 buffer[0] = htonl(*(uint32_t *)(void *)&pt[0]);
1656                 buffer[1] = htonl(*(uint32_t *)(void *)&pt[4]);
1657                 buffer[2] = htonl(*(uint32_t *)(void *)&pt[8]);
1658                 buffer[3] = htonl(*(uint32_t *)(void *)&pt[12]);
1659 #endif
1660 
1661                 AES_ENCRYPT_IMPL(&ksch->encr_ks.ks32[0], ksch->nr,
1662                     buffer, buffer, ksch->flags);
1663 
1664                 /* Copy result from buffer to output block */
1665 #ifndef AES_BYTE_SWAP
1666                 bcopy(&buffer, ct, AES_BLOCK_LEN);
1667         }
1668 
1669 #else   /* byte swap */
1670                 *(uint32_t *)(void *)&ct[0] = htonl(buffer[0]);
1671                 *(uint32_t *)(void *)&ct[4] = htonl(buffer[1]);
1672                 *(uint32_t *)(void *)&ct[8] = htonl(buffer[2]);
1673                 *(uint32_t *)(void *)&ct[12] = htonl(buffer[3]);
1674 #endif

1675         return (CRYPTO_SUCCESS);
1676 }
1677 
1678 
1679 /*
1680  * Decrypt one block using AES.
1681  * Align and byte-swap if needed.
1682  *
1683  * Parameters:
1684  * ks   Key schedule, of type aes_key_t
1685  * ct   Input block (crypto text)
1686  * pt   Output block (plain text). Can overlap with pt
1687  */
1688 int
1689 aes_decrypt_block(const void *ks, const uint8_t *ct, uint8_t *pt)
1690 {

1691         aes_key_t       *ksch = (aes_key_t *)ks;
1692 
1693 #ifndef AES_BYTE_SWAP
1694         if (IS_P2ALIGNED2(ct, pt, sizeof (uint32_t))) {
1695                 /* LINTED:  pointer alignment */
1696                 AES_DECRYPT_IMPL(&ksch->decr_ks.ks32[0], ksch->nr,
1697                     /* LINTED:  pointer alignment */
1698                     (uint32_t *)ct, (uint32_t *)pt, ksch->flags);
1699         } else {
1700 #endif
1701                 uint32_t buffer[AES_BLOCK_LEN / sizeof (uint32_t)];
1702 
1703                 /* Copy input block into buffer */
1704 #ifndef AES_BYTE_SWAP
1705                 bcopy(ct, &buffer, AES_BLOCK_LEN);
1706 
1707 #else   /* byte swap */
1708                 buffer[0] = htonl(*(uint32_t *)(void *)&ct[0]);
1709                 buffer[1] = htonl(*(uint32_t *)(void *)&ct[4]);
1710                 buffer[2] = htonl(*(uint32_t *)(void *)&ct[8]);
1711                 buffer[3] = htonl(*(uint32_t *)(void *)&ct[12]);
1712 #endif
1713 
1714                 AES_DECRYPT_IMPL(&ksch->decr_ks.ks32[0], ksch->nr,
1715                     buffer, buffer, ksch->flags);
1716 
1717                 /* Copy result from buffer to output block */
1718 #ifndef AES_BYTE_SWAP
1719                 bcopy(&buffer, pt, AES_BLOCK_LEN);
1720         }
1721 
1722 #else   /* byte swap */
1723         *(uint32_t *)(void *)&pt[0] = htonl(buffer[0]);
1724         *(uint32_t *)(void *)&pt[4] = htonl(buffer[1]);
1725         *(uint32_t *)(void *)&pt[8] = htonl(buffer[2]);
1726         *(uint32_t *)(void *)&pt[12] = htonl(buffer[3]);
1727 #endif
1728 

1729         return (CRYPTO_SUCCESS);
1730 }
1731 
1732 
1733 /*
1734  * Allocate key schedule for AES.
1735  *
1736  * Return the pointer and set size to the number of bytes allocated.
1737  * Memory allocated must be freed by the caller when done.
1738  *
1739  * Parameters:
1740  * size         Size of key schedule allocated, in bytes
1741  * kmflag       Flag passed to kmem_alloc(9F); ignored in userland.
1742  */
1743 /* ARGSUSED */
1744 void *
1745 aes_alloc_keysched(size_t *size, int kmflag)
1746 {

1747         aes_key_t *keysched;
1748 
1749 #ifdef  _KERNEL
1750         keysched = (aes_key_t *)kmem_alloc(sizeof (aes_key_t), kmflag);
1751 #else   /* !_KERNEL */
1752         keysched = (aes_key_t *)malloc(sizeof (aes_key_t));
1753 #endif  /* _KERNEL */
1754 
1755         if (keysched != NULL) {
1756                 *size = sizeof (aes_key_t);
1757                 return (keysched);
1758         }

1759         return (NULL);
1760 }
1761 
1762 
1763 #ifdef __amd64
1764 /*
1765  * Return 1 if executing on Intel with AES-NI instructions,
1766  * otherwise 0 (i.e., Intel without AES-NI or AMD64).
1767  * Cache the result, as the CPU can't change.
1768  *
1769  * Note: the userland version uses getisax().  The kernel version uses
1770  * global variable x86_featureset.
1771  */
1772 static int
1773 intel_aes_instructions_present(void)
1774 {
1775         static int      cached_result = -1;
1776 
1777         if (cached_result == -1) { /* first time */
1778 #ifdef _KERNEL