code stringlengths 1 1.05M | repo_name stringlengths 6 83 | path stringlengths 3 242 | language stringclasses 222 values | license stringclasses 20 values | size int64 1 1.05M |
|---|---|---|---|---|---|
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#include "hitls_build.h"
#ifdef HITLS_BSL_SAL_TIME
#ifdef HITLS_BSL_ERR
#include "bsl_err_internal.h"
#endif
#include "bsl_sal.h"
#include "sal_timeimpl.h"
#include "bsl_errno.h"
#include "sal_time.h"
static BSL_SAL_TimeCallback g_timeCallback = {0};
int32_t SAL_TimeCallback_Ctrl(BSL_SAL_CB_FUNC_TYPE type, void *funcCb)
{
if (type > BSL_SAL_TIME_TICK_PER_SEC_CB_FUNC || type < BSL_SAL_TIME_GET_UTC_TIME_CB_FUNC) {
return BSL_SAL_TIME_NO_REG_FUNC;
}
uint32_t offset = (uint32_t)(type - BSL_SAL_TIME_GET_UTC_TIME_CB_FUNC);
((void **)&g_timeCallback)[offset] = funcCb;
return BSL_SUCCESS;
}
void BSL_SAL_SysTimeFuncReg(BslTimeFunc func)
{
if (func != NULL) {
g_timeCallback.pfGetSysTime = func;
}
return;
}
void BSL_SysTimeFuncUnReg(void)
{
g_timeCallback.pfGetSysTime = NULL;
return;
}
bool BSL_IsLeapYear(uint32_t year)
{
return ((((year % 4U) == 0U) && ((year % 100U) != 0U)) || ((year % 400U) == 0U));
}
static int64_t BslMkTime64Get(const BSL_TIME *inputTime)
{
int64_t result;
uint32_t i;
int32_t unixYear;
int32_t unixDay;
int32_t extraDay = 0;
int32_t year = inputTime->year;
int32_t month = inputTime->month - 1;
int32_t day = inputTime->day;
int32_t hour = inputTime->hour;
int32_t minute = inputTime->minute;
int32_t second = inputTime->second;
int32_t monthTable[13] = {0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365};
for (i = BSL_TIME_SYSTEM_EPOCH_YEAR; (int32_t)i < year; i++) {
if (BSL_IsLeapYear(i) == true) {
extraDay++;
}
}
unixYear = year - (int32_t)BSL_TIME_SYSTEM_EPOCH_YEAR;
if (BSL_IsLeapYear((uint32_t)year) == true) {
for (i = BSL_MONTH_FEB; i < BSL_MONTH_DEC; i++) {
monthTable[i] = monthTable[i] + 1;
}
}
unixDay = (unixYear * (int32_t)BSL_TIME_DAY_PER_NONLEAP_YEAR) + monthTable[month] + (day - 1) + extraDay;
result = unixDay * (int64_t)86400; /* 86400 is the number of seconds in a day */
result = (hour * (int64_t)3600) + result; /* 3600 is the number of seconds in a hour */
result = (minute * (int64_t)60) + second + result; /* 60 is the number of seconds in a minute */
return result;
}
/**
* @brief Convert the given date structure to the number of seconds since January 1,1970
* @param inputTime [IN] Pointer to the date to be converted.
* @param utcTime [OUT] Pointer to the storage of the conversion result
* @return BSL_SUCCESS successfully executed.
* BSL_INTERNAL_EXCEPTION Execution Failure
*/
static uint32_t BslUtcTimeGet(const BSL_TIME *inputTime, int64_t *utcTime)
{
int64_t result;
if (inputTime == NULL || utcTime == NULL) {
return BSL_INTERNAL_EXCEPTION;
}
if (BSL_DateTimeCheck(inputTime) == false) {
return BSL_INTERNAL_EXCEPTION;
}
result = BslMkTime64Get(inputTime);
if (result < 0) {
*utcTime = -1;
return BSL_INTERNAL_EXCEPTION;
} else {
*utcTime = result;
return BSL_SUCCESS;
}
}
#define BSL_TIMESTR_MINLEN 26
uint32_t BSL_DateToStrConvert(const BSL_TIME *dateTime, char *timeStr, size_t len)
{
if (dateTime == NULL || timeStr == NULL || len < BSL_TIMESTR_MINLEN) {
return BSL_INTERNAL_EXCEPTION;
}
if (BSL_DateTimeCheck(dateTime) != true) {
return BSL_INTERNAL_EXCEPTION;
}
if (g_timeCallback.pfDateToStrConvert != NULL && g_timeCallback.pfDateToStrConvert != BSL_DateToStrConvert) {
return g_timeCallback.pfDateToStrConvert(dateTime, timeStr, len);
}
#ifdef HITLS_BSL_SAL_LINUX
return TIME_DateToStrConvert(dateTime, timeStr, len);
#else
return BSL_SAL_TIME_NO_REG_FUNC;
#endif
}
int64_t BSL_SAL_CurrentSysTimeGet(void)
{
if (g_timeCallback.pfGetSysTime != NULL && g_timeCallback.pfGetSysTime != BSL_SAL_CurrentSysTimeGet) {
return g_timeCallback.pfGetSysTime();
}
#ifdef HITLS_BSL_SAL_LINUX
return TIME_GetSysTime();
#else
BSL_ERR_PUSH_ERROR(BSL_SAL_TIME_NO_REG_FUNC);
return 0;
#endif
}
static uint32_t BslDateTimeCmpCheck(const BSL_TIME *dateA, int64_t *utcTimeA,
const BSL_TIME *dateB, int64_t *utcTimeB)
{
if ((dateA == NULL) || (dateB == NULL)) {
return BSL_INTERNAL_EXCEPTION;
}
if (BslUtcTimeGet(dateA, utcTimeA) != BSL_SUCCESS) {
return BSL_INTERNAL_EXCEPTION;
}
if (BslUtcTimeGet(dateB, utcTimeB) != BSL_SUCCESS) {
return BSL_INTERNAL_EXCEPTION;
}
return BSL_SUCCESS;
}
int32_t BSL_SAL_DateTimeCompare(const BSL_TIME *dateA, const BSL_TIME *dateB, int64_t *diffSec)
{
int64_t utcTimeA = 0;
int64_t utcTimeB = 0;
int64_t dTimeDiff;
uint32_t ret;
if (BslDateTimeCmpCheck(dateA, &utcTimeA, dateB, &utcTimeB) == BSL_SUCCESS) {
dTimeDiff = utcTimeA - utcTimeB;
if (diffSec != NULL) {
*diffSec = dTimeDiff;
}
if (dTimeDiff < 0) {
ret = (uint32_t)BSL_TIME_DATE_BEFORE;
} else if (dTimeDiff > 0) {
ret = (uint32_t)BSL_TIME_DATE_AFTER;
} else {
ret = (uint32_t)BSL_TIME_CMP_EQUAL;
}
} else {
ret = (uint32_t)BSL_TIME_CMP_ERROR;
}
return ret;
}
static uint32_t TimeCmp(uint32_t a, uint32_t b)
{
if (a > b) {
return BSL_TIME_DATE_AFTER;
}
if (a < b) {
return BSL_TIME_DATE_BEFORE;
}
return BSL_TIME_CMP_EQUAL;
}
int32_t BSL_SAL_DateTimeCompareByUs(const BSL_TIME *dateA, const BSL_TIME *dateB)
{
int64_t diffSec = 0;
uint32_t ret;
ret = BSL_SAL_DateTimeCompare(dateA, dateB, &diffSec);
if (ret != BSL_TIME_CMP_EQUAL) {
return ret;
}
ret = TimeCmp(dateA->millSec, dateB->millSec);
if (ret != BSL_TIME_CMP_EQUAL) {
return ret;
}
return TimeCmp(dateA->microSec, dateB->microSec);
}
uint32_t BSL_DateTimeAddUs(BSL_TIME *dateR, const BSL_TIME *dateA, uint32_t us)
{
uint32_t ret;
int64_t utcTime = 0;
/* Convert the date into seconds. */
ret = BSL_SAL_DateToUtcTimeConvert(dateA, &utcTime);
if (ret != BSL_SUCCESS) {
return ret;
}
/* Convert the increased time to seconds */
uint32_t microSec = us + dateA->microSec;
uint32_t millSec = (microSec / BSL_SECOND_TRANSFER_RATIO) + dateA->millSec;
microSec %= BSL_SECOND_TRANSFER_RATIO;
uint32_t second = millSec / BSL_SECOND_TRANSFER_RATIO;
millSec %= BSL_SECOND_TRANSFER_RATIO;
/* Convert to the date after the number of seconds is added */
utcTime += (int64_t)second;
ret = BSL_SAL_UtcTimeToDateConvert(utcTime, dateR);
if (ret != BSL_SUCCESS) {
return ret;
}
/* Complete milliseconds and microseconds. */
dateR->millSec = (uint16_t)millSec;
dateR->microSec = microSec;
return BSL_SUCCESS;
}
int32_t BSL_SAL_DateToUtcTimeConvert(const BSL_TIME *dateTime, int64_t *utcTime)
{
uint32_t ret = BSL_INTERNAL_EXCEPTION;
if ((dateTime != NULL) && (utcTime != NULL)) {
if (BSL_DateTimeCheck(dateTime) == true) {
ret = BslUtcTimeGet(dateTime, utcTime);
}
}
return ret;
}
static bool BslFebDayValidCheck(uint16_t year, uint8_t day)
{
bool ret;
if ((BSL_IsLeapYear(year) == true) && (day <= BSL_TIME_LEAP_FEBRUARY_DAY)) {
ret = true;
} else if ((BSL_IsLeapYear(year) == false) && (day <= BSL_TIME_NOLEAP_FEBRUARY_DAY)) {
ret = true;
} else {
ret = false;
}
return ret;
}
static bool BslDayValidCheck(uint16_t year, uint8_t month, uint8_t day)
{
bool ret = true;
switch (month) {
case BSL_MONTH_JAN:
case BSL_MONTH_MAR:
case BSL_MONTH_MAY:
case BSL_MONTH_JUL:
case BSL_MONTH_AUG:
case BSL_MONTH_OCT:
case BSL_MONTH_DEC:
if (day > BSL_TIME_BIG_MONTH_DAY) {
ret = false;
}
break;
case BSL_MONTH_APR:
case BSL_MONTH_JUN:
case BSL_MONTH_SEM:
case BSL_MONTH_NOV:
if (day > BSL_TIME_SMALL_MONTH_DAY) {
ret = false;
}
break;
case BSL_MONTH_FEB:
ret = BslFebDayValidCheck(year, day);
break;
default:
ret = false;
break;
}
return ret;
}
static bool BslYearMonthDayCheck(const BSL_TIME *dateTime)
{
if (dateTime->year < BSL_TIME_SYSTEM_EPOCH_YEAR) {
return false;
} else if ((dateTime->month < BSL_MONTH_JAN) || (dateTime->month > BSL_MONTH_DEC)) {
return false;
} else if (dateTime->day < BSL_MONTH_JAN) {
return false;
} else {
return BslDayValidCheck(dateTime->year, dateTime->month, dateTime->day);
}
}
static bool BslHourMinSecCheck(const BSL_TIME *dateTime)
{
bool ret;
if (dateTime->hour > 23U) {
ret = false;
} else if (dateTime->minute > 59U) {
ret = false;
} else if (dateTime->second > 59U) {
ret = false;
} else if (dateTime->millSec > 999U) {
ret = false;
} else if (dateTime->microSec > 999U) { /* microseconds does not exceed the maximum value 1000 */
ret = false;
} else {
ret = true;
}
return ret;
}
bool BSL_DateTimeCheck(const BSL_TIME *dateTime)
{
bool ret = true;
if ((BslYearMonthDayCheck(dateTime) == false) ||
(BslHourMinSecCheck(dateTime) == false)) {
ret = false;
}
return ret;
}
int32_t BSL_SAL_UtcTimeToDateConvert(int64_t utcTime, BSL_TIME *sysTime)
{
if (sysTime == NULL || utcTime > BSL_UTCTIME_MAX) {
return BSL_SAL_ERR_BAD_PARAM;
}
if (g_timeCallback.pfUtcTimeToDateConvert != NULL &&
g_timeCallback.pfUtcTimeToDateConvert != (BslSalUtcTimeToDateConvert)BSL_SAL_UtcTimeToDateConvert) {
return g_timeCallback.pfUtcTimeToDateConvert(utcTime, sysTime);
}
#ifdef HITLS_BSL_SAL_LINUX
return TIME_UtcTimeToDateConvert(utcTime, sysTime);
#else
return BSL_SAL_TIME_NO_REG_FUNC;
#endif
}
int32_t BSL_SAL_SysTimeGet(BSL_TIME *sysTime)
{
if (sysTime == NULL) {
return BSL_SAL_ERR_BAD_PARAM;
}
if (g_timeCallback.pfSysTimeGet != NULL && g_timeCallback.pfSysTimeGet != (BslSalSysTimeGet)BSL_SAL_SysTimeGet) {
return g_timeCallback.pfSysTimeGet(sysTime);
}
#ifdef HITLS_BSL_SAL_LINUX
return TIME_SysTimeGet(sysTime);
#else
return BSL_SAL_TIME_NO_REG_FUNC;
#endif
}
void BSL_SAL_Sleep(uint32_t time)
{
if (g_timeCallback.pfSleep != NULL && g_timeCallback.pfSleep != BSL_SAL_Sleep) {
g_timeCallback.pfSleep(time);
return;
}
#ifdef HITLS_BSL_SAL_LINUX
SAL_Sleep(time);
#endif
}
long BSL_SAL_Tick(void)
{
if (g_timeCallback.pfTick != NULL && g_timeCallback.pfTick != BSL_SAL_Tick) {
return g_timeCallback.pfTick();
}
#ifdef HITLS_BSL_SAL_LINUX
return SAL_Tick();
#else
return BSL_SAL_TIME_NO_REG_FUNC;
#endif
}
long BSL_SAL_TicksPerSec(void)
{
if (g_timeCallback.pfTicksPerSec != NULL && g_timeCallback.pfTicksPerSec != BSL_SAL_TicksPerSec) {
return g_timeCallback.pfTicksPerSec();
}
#ifdef HITLS_BSL_SAL_LINUX
return SAL_TicksPerSec();
#else
return BSL_SAL_TIME_NO_REG_FUNC;
#endif
}
#endif /* HITLS_BSL_SAL_TIME */
| 2302_82127028/openHiTLS-examples_1508 | bsl/sal/src/sal_time.c | C | unknown | 11,851 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#ifndef SAL_TIMEIMPL_H
#define SAL_TIMEIMPL_H
#include "hitls_build.h"
#ifdef HITLS_BSL_SAL_TIME
#include <stdint.h>
#include "bsl_sal.h"
#ifdef __cplusplus
extern "C" {
#endif // __cplusplus
typedef struct {
BslSalGetSysTime pfGetSysTime;
BslSalDateToStrConvert pfDateToStrConvert;
BslSalSysTimeGet pfSysTimeGet;
BslSalUtcTimeToDateConvert pfUtcTimeToDateConvert;
BslSalSleep pfSleep;
BslSalTick pfTick;
BslSalTicksPerSec pfTicksPerSec;
} BSL_SAL_TimeCallback;
int32_t SAL_TimeCallback_Ctrl(BSL_SAL_CB_FUNC_TYPE type, void *funcCb);
#ifdef HITLS_BSL_SAL_LINUX
int64_t TIME_GetSysTime(void);
uint32_t TIME_DateToStrConvert(const BSL_TIME *dateTime, char *timeStr, size_t len);
uint32_t TIME_SysTimeGet(BSL_TIME *sysTime);
uint32_t TIME_UtcTimeToDateConvert(int64_t utcTime, BSL_TIME *sysTime);
void SAL_Sleep(uint32_t time);
long SAL_Tick(void);
long SAL_TicksPerSec(void);
#endif
#ifdef __cplusplus
}
#endif // __cplusplus
#endif // HITLS_BSL_SAL_TIME
#endif // SAL_TIMEIMPL_H
| 2302_82127028/openHiTLS-examples_1508 | bsl/sal/src/sal_timeimpl.h | C | unknown | 1,559 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#ifndef TLV_H
#define TLV_H
#include "hitls_build.h"
#ifdef HITLS_BSL_TLV
#include <stdint.h>
#ifdef __cplusplus
extern "C" {
#endif
#define TLV_HEADER_LENGTH (sizeof(uint32_t) + sizeof(uint32_t))
typedef struct {
uint32_t type;
uint32_t length;
uint8_t *value;
} BSL_Tlv;
/**
* @ingroup bsl_tlv
* @brief Construct a TLV message based on the TLV structure.
*
* @param tlv [IN] TLV structure
* @param buffer [OUT] Message memory
* @param bufLen [IN] Memory length
* @param usedLen [OUT] Message length
*
* @retval BSL_SUCCESS successfully created.
* @retval BSL_TLV_ERR_BAD_PARAM Parameter incorrect
* @retval BSL_MEMCPY_FAIL Memory Copy Failure
*/
int32_t BSL_TLV_Pack(const BSL_Tlv *tlv, uint8_t *buffer, uint32_t bufLen, uint32_t *usedLen);
/**
* @ingroup bsl_tlv
* @brief Parse the TLV message of the specified type and generate the TLV structure.
*
* @param wantType [IN] TLV type
* @param data [IN] TLV message memory
* @param dataLen [IN] Message length
* @param tlv [OUT] TLV Structure
* @param readLen [OUT] Length of the parsed message
*
* @retval BSL_SUCCESS parsed successfully.
* @retval BSL_TLV_ERR_BAD_PARAM Parameter incorrect
* @retval BSL_MEMCPY_FAIL Memory Copy Failure
* @retval BSL_TLV_ERR_NO_WANT_TYPE No TLV found
*/
int32_t BSL_TLV_Parse(uint32_t wantType, const uint8_t *data, uint32_t dataLen, BSL_Tlv *tlv, uint32_t *readLen);
/**
* @ingroup bsl_tlv
* @brief Find the TLV of the specified type
* and calculate the offset from the memory start address to the TLV data.
*
* @param wantType [IN] TLV type
* @param data [IN] TLV message memory
* @param dataLen [IN] Message length
* @param offset [OUT] TLV data offset
* @param length [OUT] Data length
*
* @retval BSL_SUCCESS succeeded.
* @retval BSL_TLV_ERR_BAD_PARAM Parameter incorrect
* @retval BSL_TLV_ERR_NO_WANT_TYPE No TLV found
*/
int32_t BSL_TLV_FindValuePos(uint32_t wantType, const uint8_t *data, uint32_t dataLen,
uint32_t *offset, uint32_t *length);
#ifdef __cplusplus
}
#endif
#endif /* HITLS_BSL_TLV */
#endif // TLV_H
| 2302_82127028/openHiTLS-examples_1508 | bsl/tlv/include/tlv.h | C | unknown | 2,674 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#include "hitls_build.h"
#ifdef HITLS_BSL_TLV
#include <stdint.h>
#include "securec.h"
#include "bsl_errno.h"
#include "bsl_bytes.h"
#include "bsl_log_internal.h"
#include "bsl_err_internal.h"
#include "bsl_binlog_id.h"
#include "tlv.h"
int32_t BSL_TLV_Pack(const BSL_Tlv *tlv, uint8_t *buffer, uint32_t bufLen, uint32_t *usedLen)
{
uint8_t *curPos = buffer;
if ((bufLen < TLV_HEADER_LENGTH) || (tlv->length > bufLen - TLV_HEADER_LENGTH)) {
BSL_LOG_BINLOG_FIXLEN(BINLOG_ID05013, BSL_LOG_LEVEL_ERR, BSL_LOG_BINLOG_TYPE_RUN,
"TLV build error: bufLen = %u is not enough for tlv length = %u, tlv type = 0x%x.",
bufLen, tlv->length, tlv->type, 0);
BSL_ERR_PUSH_ERROR(BSL_TLV_ERR_BAD_PARAM);
return BSL_TLV_ERR_BAD_PARAM;
}
/* Write the TLV type */
BSL_Uint32ToByte(tlv->type, curPos);
curPos += sizeof(uint32_t);
/* Write the TLV length */
BSL_Uint32ToByte(tlv->length, curPos);
curPos += sizeof(uint32_t);
/* Write TLV data */
if (memcpy_s(curPos, bufLen - TLV_HEADER_LENGTH, tlv->value, tlv->length) != EOK) {
BSL_LOG_BINLOG_FIXLEN(BINLOG_ID05014, BSL_LOG_LEVEL_ERR, BSL_LOG_BINLOG_TYPE_RUN,
"TLV build error: write tlv value fail, bufLen = %u, tlv length = %u, tlv type = 0x%x.",
bufLen, tlv->length, tlv->type, 0);
BSL_ERR_PUSH_ERROR(BSL_MEMCPY_FAIL);
return BSL_MEMCPY_FAIL;
}
*usedLen = TLV_HEADER_LENGTH + tlv->length;
return BSL_SUCCESS;
}
static int32_t TLV_ParseHeader(const uint8_t *data, uint32_t dataLen, uint32_t *type, uint32_t *length)
{
const uint8_t *curPos = data;
/* Parse the TLV type */
uint32_t tlvType = BSL_ByteToUint32(curPos);
curPos += sizeof(uint32_t);
/* Parse the TLV length */
uint32_t tlvLen = BSL_ByteToUint32(curPos);
if (tlvLen > dataLen - TLV_HEADER_LENGTH) {
BSL_LOG_BINLOG_FIXLEN(BINLOG_ID05015, BSL_LOG_LEVEL_ERR, BSL_LOG_BINLOG_TYPE_RUN,
"Check TLV header error: dataLen = %u, tlv length = %u, tlv type = 0x%x.", dataLen, tlvLen, tlvType, 0);
BSL_ERR_PUSH_ERROR(BSL_TLV_ERR_BAD_PARAM);
return BSL_TLV_ERR_BAD_PARAM;
}
*type = tlvType;
*length = tlvLen;
return BSL_SUCCESS;
}
int32_t BSL_TLV_Parse(uint32_t wantType, const uint8_t *data, uint32_t dataLen, BSL_Tlv *tlv, uint32_t *readLen)
{
int32_t ret;
const uint8_t *curPos = data;
uint32_t remainLen = dataLen;
uint32_t type;
uint32_t length;
while (remainLen >= TLV_HEADER_LENGTH) {
/* Parse the TLV type and length */
ret = TLV_ParseHeader(curPos, remainLen, &type, &length);
if (ret != BSL_SUCCESS) {
BSL_LOG_BINLOG_FIXLEN(BINLOG_ID05016, BSL_LOG_LEVEL_ERR, BSL_LOG_BINLOG_TYPE_RUN,
"Parse TLV error: tlv header illegal.", 0, 0, 0, 0);
return ret;
}
remainLen -= (TLV_HEADER_LENGTH + length);
/* The TLV type matches the expected type */
if (wantType == type) {
/* Parse the TLV data */
if (memcpy_s(tlv->value, tlv->length, curPos + TLV_HEADER_LENGTH, length) != EOK) {
BSL_LOG_BINLOG_FIXLEN(BINLOG_ID05017, BSL_LOG_LEVEL_ERR, BSL_LOG_BINLOG_TYPE_RUN,
"Parse TLV error: write tlv value fail, bufLen = %u, tlv length = %u, tlv type = 0x%x.",
tlv->length, length, type, 0);
BSL_ERR_PUSH_ERROR(BSL_MEMCPY_FAIL);
return BSL_MEMCPY_FAIL;
}
tlv->type = type;
tlv->length = length;
*readLen = dataLen - remainLen;
return BSL_SUCCESS;
}
/* The TLV type does not match the expected type. Continue to parse the next TLV. */
curPos += (TLV_HEADER_LENGTH + length);
}
/* No matched TLV found */
BSL_LOG_BINLOG_FIXLEN(BINLOG_ID05018, BSL_LOG_LEVEL_ERR, BSL_LOG_BINLOG_TYPE_RUN,
"Parse TLV error: no want type(0x%x), dataLen = %u.", wantType, dataLen, 0, 0);
BSL_ERR_PUSH_ERROR(BSL_TLV_ERR_NO_WANT_TYPE);
return BSL_TLV_ERR_NO_WANT_TYPE;
}
int32_t BSL_TLV_FindValuePos(uint32_t wantType, const uint8_t *data, uint32_t dataLen,
uint32_t *offset, uint32_t *length)
{
int32_t ret;
const uint8_t *curPos = data;
uint32_t remainLen = dataLen;
uint32_t type;
while (remainLen > TLV_HEADER_LENGTH) {
/* Parse the TLV type and length */
ret = TLV_ParseHeader(curPos, remainLen, &type, length);
if (ret != BSL_SUCCESS) {
BSL_LOG_BINLOG_FIXLEN(BINLOG_ID05019, BSL_LOG_LEVEL_ERR, BSL_LOG_BINLOG_TYPE_RUN,
"Find TLV error: tlv header illegal.", 0, 0, 0, 0);
return ret;
}
/* The TLV type matches the expected type */
if (wantType == type) {
*offset = dataLen - remainLen + TLV_HEADER_LENGTH;
return BSL_SUCCESS;
}
/* The TLV type does not match the expected type. Continue to parse the next TLV. */
curPos += (TLV_HEADER_LENGTH + *length);
remainLen -= (TLV_HEADER_LENGTH + *length);
}
/* No matched TLV found */
BSL_LOG_BINLOG_FIXLEN(BINLOG_ID05020, BSL_LOG_LEVEL_ERR, BSL_LOG_BINLOG_TYPE_RUN,
"Find TLV error: no want type(0x%x), dataLen = %u.", wantType, dataLen, 0, 0);
BSL_ERR_PUSH_ERROR(BSL_TLV_ERR_NO_WANT_TYPE);
return BSL_TLV_ERR_NO_WANT_TYPE;
}
#endif /* HITLS_BSL_TLV */
| 2302_82127028/openHiTLS-examples_1508 | bsl/tlv/src/tlv.c | C | unknown | 5,973 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#ifndef UI_TYPE_H
#define UI_TYPE_H
#include "hitls_build.h"
#ifdef HITLS_BSL_UI
#include <stdint.h>
#include "bsl_sal.h"
#include "bsl_ui.h"
#ifdef __cplusplus
extern "C" {
#endif /* __cpluscplus */
struct UI_ControlMethod {
int32_t (*uiOpen) (BSL_UI *ui); // Open the input and output streams
int32_t (*uiWrite) (BSL_UI *ui, BSL_UI_DataPack *data); // Write callback
int32_t (*uiRead) (BSL_UI *ui, BSL_UI_DataPack *data); // Read callback
int32_t (*uiClose) (BSL_UI *ui); // Close the input and output streams.
};
struct UI_Control {
const BSL_UI_Method *method;
BSL_SAL_ThreadLockHandle lock;
void *in;
void *out;
void *exData;
};
struct UI_ControlDataPack {
uint32_t type;
uint32_t flags;
char *data;
uint32_t dataLen;
char *verifyData;
};
#define BSL_UI_SUPPORT_ABILITY(cap, pos) (((cap) & (pos)) != 0)
#define BSL_UI_READ_BUFF_MAX_LEN 1025 // 1024 + '\0'
#ifdef __cplusplus
}
#endif /* __cpluscplus */
#endif /* HITLS_BSL_UI */
#endif /* UI_TYPE_H */
| 2302_82127028/openHiTLS-examples_1508 | bsl/ui/include/ui_type.h | C | unknown | 1,571 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#include "hitls_build.h"
#ifdef HITLS_BSL_UI
#include <stdio.h>
#include "securec.h"
#include "bsl_sal.h"
#include "ui_type.h"
#include "bsl_errno.h"
#include "bsl_log_internal.h"
#include "bsl_err_internal.h"
#include "bsl_binlog_id.h"
#include "bsl_ui.h"
#define BSL_UI_PROMPT_PART_MAX_LEN 200
BSL_UI *BSL_UI_New(const BSL_UI_Method *method)
{
BSL_UI *ui = (BSL_UI *)BSL_SAL_Malloc(sizeof(BSL_UI));
if (ui == NULL) {
return NULL;
}
(void)memset_s(ui, sizeof(BSL_UI), 0, sizeof(BSL_UI));
int32_t ret = BSL_SAL_ThreadLockNew(&(ui->lock));
if (ret != BSL_SUCCESS) {
BSL_SAL_FREE(ui);
BSL_LOG_BINLOG_FIXLEN(BINLOG_ID05061, BSL_LOG_LEVEL_ERR, BSL_LOG_BINLOG_TYPE_RUN,
"ui new: new thread lock error ret = %u.", (uint32_t)ret, 0, 0, 0);
return NULL;
}
if (method == NULL) {
ui->method = BSL_UI_GetOperMethod(NULL);
} else {
ui->method = method;
}
return ui;
}
void BSL_UI_Free(BSL_UI *ui)
{
if (ui == NULL) {
return;
}
BSL_SAL_ThreadLockFree(ui->lock);
BSL_SAL_FREE(ui);
}
BSL_UI_Method *BSL_UI_MethodNew(void)
{
BSL_UI_Method *method = (BSL_UI_Method *)BSL_SAL_Malloc(sizeof(BSL_UI_Method));
if (method == NULL) {
return method;
}
(void)memset_s(method, sizeof(BSL_UI_Method), 0, sizeof(BSL_UI_Method));
return method;
}
void BSL_UI_MethodFree(BSL_UI_Method *method)
{
if (method == NULL) {
return;
}
BSL_SAL_FREE(method);
}
int32_t BSL_UI_SetMethod(BSL_UI_Method *method, uint8_t type, void *func)
{
if (method == NULL || func == NULL) {
BSL_ERR_PUSH_ERROR(BSL_NULL_INPUT);
return BSL_NULL_INPUT;
}
switch (type) {
case BSL_UIM_OPEN:
method->uiOpen = func;
break;
case BSL_UIM_WRITE:
method->uiWrite = func;
break;
case BSL_UIM_READ:
method->uiRead = func;
break;
case BSL_UIM_CLOSE:
method->uiClose = func;
break;
default:
BSL_ERR_PUSH_ERROR(BSL_UI_METHOD_INVALID_TYPE);
return BSL_UI_METHOD_INVALID_TYPE;
}
return BSL_SUCCESS;
}
int32_t BSL_UI_GetMethod(const BSL_UI_Method *method, uint8_t type, void **func)
{
if (method == NULL || func == NULL) {
BSL_ERR_PUSH_ERROR(BSL_NULL_INPUT);
return BSL_NULL_INPUT;
}
switch (type) {
case BSL_UIM_OPEN:
*func = method->uiOpen;
break;
case BSL_UIM_WRITE:
*func = method->uiWrite;
break;
case BSL_UIM_READ:
*func = method->uiRead;
break;
case BSL_UIM_CLOSE:
*func = method->uiClose;
break;
default:
BSL_ERR_PUSH_ERROR(BSL_UI_METHOD_INVALID_TYPE);
return BSL_UI_METHOD_INVALID_TYPE;
}
return BSL_SUCCESS;
}
char *BSL_UI_ConstructPrompt(const char *objectDesc, const char *objectName)
{
if (objectDesc == NULL) {
return NULL;
}
if (strlen(objectDesc) > BSL_UI_PROMPT_PART_MAX_LEN) {
return NULL;
}
char *outString = NULL;
char start[] = "Please input ";
char middle[] = " for ";
char end[] = ":";
uint32_t outLen = (uint32_t)strlen(start) + (uint32_t)strlen(objectDesc) + (uint32_t)strlen(end) + 1;
if (objectName != NULL) {
if (strlen(objectName) > BSL_UI_PROMPT_PART_MAX_LEN) {
return NULL;
}
outLen += (uint32_t)strlen(middle) + (uint32_t)strlen(objectName);
}
outString = (char *)BSL_SAL_Malloc(outLen);
if (outString == NULL) {
return NULL;
}
(void)strcpy_s(outString, outLen, start);
(void)strcat_s(outString, outLen, objectDesc);
if (objectName != NULL) {
(void)strcat_s(outString, outLen, middle);
(void)strcat_s(outString, outLen, objectName);
}
(void)strcat_s(outString, outLen, end);
return outString;
}
static int32_t BSL_UI_OperDataOnce(BSL_UI *ui, BSL_UI_DataPack *writeData, BSL_UI_DataPack *readData)
{
int32_t ret = ui->method->uiWrite(ui, writeData);
if (ret != BSL_SUCCESS) {
BSL_LOG_BINLOG_FIXLEN(BINLOG_ID05082, BSL_LOG_LEVEL_ERR, BSL_LOG_BINLOG_TYPE_RUN,
"ui pwd util: write error:%u.", (uint32_t)ret, 0, 0, 0);
return ret;
}
ret = ui->method->uiRead(ui, readData);
if (ret != BSL_SUCCESS) {
BSL_LOG_BINLOG_FIXLEN(BINLOG_ID05084, BSL_LOG_LEVEL_ERR, BSL_LOG_BINLOG_TYPE_RUN,
"ui pwd util: read error:%u.", (uint32_t)ret, 0, 0, 0);
return ret;
}
return BSL_SUCCESS;
}
static int32_t BSL_UI_OperVerifyData(BSL_UI *ui, const char *promptStr, BSL_UI_DataPack *firstReadData)
{
BSL_UI_DataPack writeData = {0};
BSL_UI_DataPack readData = {0};
char verifyRes[BSL_UI_READ_BUFF_MAX_LEN] = {0};
uint32_t verifyResLen = BSL_UI_READ_BUFF_MAX_LEN;
char verifyPrompt[] = "Verify---";
char verifyFailPrompt[] = "Verify failed!\n";
uint32_t verifyLen = (uint32_t)strlen(promptStr) + (uint32_t)strlen(verifyPrompt) + 1;
char *verifyStr = BSL_SAL_Malloc(verifyLen);
if (verifyStr == NULL) {
BSL_ERR_PUSH_ERROR(BSL_UI_MEM_ALLOC_FAIL);
return BSL_UI_MEM_ALLOC_FAIL;
}
(void)memset_s(verifyStr, verifyLen, 0, verifyLen);
(void)strcpy_s(verifyStr, verifyLen, verifyPrompt);
(void)strcat_s(verifyStr, verifyLen, promptStr);
writeData.data = verifyStr;
writeData.dataLen = (uint32_t)strlen(verifyStr) + 1;
readData.data = verifyRes;
readData.dataLen = verifyResLen;
int32_t ret = BSL_UI_OperDataOnce(ui, &writeData, &readData);
if (ret != BSL_SUCCESS) {
BSL_SAL_FREE(verifyStr);
BSL_ERR_PUSH_ERROR(ret);
return ret;
}
if (readData.dataLen != firstReadData->dataLen || strcmp(verifyRes, firstReadData->data) != 0) {
writeData.data = verifyFailPrompt;
writeData.dataLen = (uint32_t)strlen(verifyFailPrompt) + 1;
(void)ui->method->uiWrite(ui, &writeData);
BSL_LOG_BINLOG_FIXLEN(BINLOG_ID05069, BSL_LOG_LEVEL_ERR, BSL_LOG_BINLOG_TYPE_RUN,
"ui pwd util: verify failed.", 0, 0, 0, 0);
ret = BSL_UI_VERIFY_BUFF_FAILED;
}
BSL_SAL_FREE(verifyStr);
(void)memset_s(verifyRes, sizeof(verifyRes), 0, sizeof(verifyRes));
BSL_ERR_PUSH_ERROR(ret);
return ret;
}
static int32_t BSL_UI_OperInputData(BSL_UI *ui, BSL_UI_ReadPwdParam *param, BSL_UI_DataPack *readData,
char **prompt, const BSL_UI_CheckDataCallBack checkDataCallBack, void *callBackData)
{
BSL_UI_DataPack writeData = {0};
char *promptStr = BSL_UI_ConstructPrompt(param->desc, param->name);
if (promptStr == NULL) {
BSL_ERR_PUSH_ERROR(BSL_UI_CONSTRUCT_PROMPT_ERROR);
return BSL_UI_CONSTRUCT_PROMPT_ERROR;
}
writeData.data = promptStr;
writeData.dataLen = (uint32_t)strlen(promptStr) + 1;
int32_t ret = BSL_UI_OperDataOnce(ui, &writeData, readData);
if (ret != BSL_SUCCESS) {
BSL_SAL_FREE(promptStr);
BSL_ERR_PUSH_ERROR(ret);
return ret;
}
if (checkDataCallBack != NULL) {
ret = checkDataCallBack(ui, readData->data, readData->dataLen, callBackData);
if (ret != BSL_SUCCESS) {
BSL_SAL_FREE(promptStr);
BSL_LOG_BINLOG_FIXLEN(BINLOG_ID05086, BSL_LOG_LEVEL_ERR, BSL_LOG_BINLOG_TYPE_RUN,
"ui pwd util: callback check data failed:%u.", (uint32_t)ret, 0, 0, 0);
BSL_ERR_PUSH_ERROR(ret);
return ret;
}
}
*prompt = promptStr;
return BSL_SUCCESS;
}
int32_t BSL_UI_ReadPwdUtil(BSL_UI_ReadPwdParam *param, char *buff, uint32_t *buffLen,
const BSL_UI_CheckDataCallBack checkDataCallBack, void *callBackData)
{
char result[BSL_UI_READ_BUFF_MAX_LEN] = {0};
char *promptStr = NULL;
if (param == NULL || buff == NULL || buffLen == NULL || *buffLen == 0) {
return BSL_NULL_INPUT;
}
BSL_UI *ui = BSL_UI_New(NULL);
if (ui == NULL) {
BSL_ERR_PUSH_ERROR(BSL_UI_CREATE_OBJECT_ERROR);
return BSL_UI_CREATE_OBJECT_ERROR;
}
int32_t ret = ui->method->uiOpen(ui);
if (ret != BSL_SUCCESS) {
BSL_UI_Free(ui);
BSL_LOG_BINLOG_FIXLEN(BINLOG_ID05083, BSL_LOG_LEVEL_ERR, BSL_LOG_BINLOG_TYPE_RUN,
"ui pwd util: open error:%u.", (uint32_t)ret, 0, 0, 0);
BSL_ERR_PUSH_ERROR(ret);
return ret;
}
do {
BSL_UI_DataPack readData = {0, 0, result, BSL_UI_READ_BUFF_MAX_LEN, NULL};
ret = BSL_UI_OperInputData(ui, param, &readData, &promptStr, checkDataCallBack, callBackData);
if (ret != BSL_SUCCESS) {
break;
}
if (*buffLen < readData.dataLen) {
ret = BSL_UI_OUTPUT_BUFF_TOO_SHORT;
BSL_LOG_BINLOG_FIXLEN(BINLOG_ID05066, BSL_LOG_LEVEL_ERR, BSL_LOG_BINLOG_TYPE_RUN,
"ui pwd util: buff len is too short.", 0, 0, 0, 0);
break;
}
if (param->verify) {
ret = BSL_UI_OperVerifyData(ui, promptStr, &readData);
if (ret != BSL_SUCCESS) {
break;
}
}
if (strcpy_s(buff, *buffLen, result) != EOK) {
ret = BSL_UI_OUTPUT_BUFF_TOO_SHORT;
break;
}
*buffLen = (uint32_t)strlen(buff) + 1;
} while (0);
ui->method->uiClose(ui);
BSL_UI_Free(ui);
BSL_SAL_FREE(promptStr);
(void)memset_s(result, sizeof(result), 0, sizeof(result));
return ret;
}
static int32_t BSL_UI_DataReadProcess(BSL_UI_DataPack *data, void *parg, uint32_t larg)
{
if (parg == NULL || larg != sizeof(BSL_UI_CtrlRGetParam)) {
BSL_LOG_BINLOG_FIXLEN(BINLOG_ID05062, BSL_LOG_LEVEL_ERR, BSL_LOG_BINLOG_TYPE_RUN,
"ui data process: read param larg error.", 0, 0, 0, 0);
BSL_ERR_PUSH_ERROR(BSL_UI_INVALID_DATA_ARG);
return BSL_UI_INVALID_DATA_ARG;
}
BSL_UI_CtrlRGetParam *param = (BSL_UI_CtrlRGetParam *)parg;
data->type = BSL_UI_DATA_READ;
data->flags = param->flags;
data->data = param->buff;
data->dataLen = param->buffLen;
data->verifyData = param->verifyBuff;
return BSL_SUCCESS;
}
static int32_t BSL_UI_DataWriteProcess(BSL_UI_DataPack *data, void *parg, uint32_t larg)
{
data->type = BSL_UI_DATA_WRITE;
data->data = parg;
data->dataLen = larg;
return BSL_SUCCESS;
}
BSL_UI_DataPack *BSL_UI_DataPackNew(void)
{
BSL_UI_DataPack *data = (BSL_UI_DataPack *)BSL_SAL_Malloc(sizeof(BSL_UI_DataPack));
if (data == NULL) {
return NULL;
}
(void)memset_s(data, sizeof(BSL_UI_DataPack), 0, sizeof(BSL_UI_DataPack));
return data;
}
void BSL_UI_DataPackFree(BSL_UI_DataPack *data)
{
if (data == NULL) {
return;
}
BSL_SAL_FREE(data);
}
int32_t BSL_UI_DataCtrl(BSL_UI_DataPack *data, uint32_t type, void *parg, uint32_t larg)
{
int32_t ret = BSL_UI_INVALID_DATA_TYPE;
if (data == NULL) {
BSL_ERR_PUSH_ERROR(BSL_NULL_INPUT);
return BSL_NULL_INPUT;
}
switch (type) {
case BSL_UI_DATA_READ:
ret = BSL_UI_DataReadProcess(data, parg, larg);
break;
case BSL_UI_DATA_WRITE:
ret = BSL_UI_DataWriteProcess(data, parg, larg);
break;
default:
break;
}
return ret;
}
int32_t BSL_UI_GetDataResult(BSL_UI_DataPack *data, char **result, uint32_t *resultLen)
{
if (data == NULL || result == NULL || resultLen == NULL) {
BSL_ERR_PUSH_ERROR(BSL_NULL_INPUT);
return BSL_NULL_INPUT;
}
if (data->data == NULL) {
BSL_ERR_PUSH_ERROR(BSL_UI_INVALID_DATA_RESULT);
return BSL_UI_INVALID_DATA_RESULT;
}
*result = data->data;
*resultLen = data->dataLen;
return BSL_SUCCESS;
}
#endif /* HITLS_BSL_UI */
| 2302_82127028/openHiTLS-examples_1508 | bsl/ui/src/ui_core.c | C | unknown | 12,382 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#include "hitls_build.h"
#ifdef HITLS_BSL_UI
#include <stdio.h>
#include <termios.h>
#include "securec.h"
#include "bsl_sal.h"
#include "sal_file.h"
#include "ui_type.h"
#include "bsl_errno.h"
#include "bsl_err_internal.h"
#include "bsl_ui.h"
#define DEV_TTY "/dev/tty"
int32_t UI_Open(BSL_UI *ui)
{
if (ui == NULL) {
BSL_ERR_PUSH_ERROR(BSL_NULL_INPUT);
return BSL_NULL_INPUT;
}
(void)BSL_SAL_ThreadWriteLock(ui->lock);
int32_t ret = BSL_SAL_FileOpen(&(ui->in), DEV_TTY, "r");
if (ret != BSL_SUCCESS) {
(void)BSL_SAL_ThreadUnlock(ui->lock);
return ret;
}
ret = BSL_SAL_FileOpen(&(ui->out), DEV_TTY, "w");
if (ret != BSL_SUCCESS) {
BSL_SAL_FileClose(ui->in);
(void)BSL_SAL_ThreadUnlock(ui->lock);
return ret;
}
return BSL_SUCCESS;
}
static int32_t UI_CheckDataCommonParam(BSL_UI *ui, BSL_UI_DataPack *data)
{
if (ui == NULL || data == NULL || data->data == NULL || data->dataLen == 0) {
BSL_ERR_PUSH_ERROR(BSL_NULL_INPUT);
return BSL_NULL_INPUT;
}
return BSL_SUCCESS;
}
static int32_t UI_CheckDataWriteParam(BSL_UI *ui, BSL_UI_DataPack *data)
{
int32_t ret = UI_CheckDataCommonParam(ui, data);
if (ret != BSL_SUCCESS) {
return ret;
}
if (ui->out == NULL) {
BSL_ERR_PUSH_ERROR(BSL_NULL_INPUT);
return BSL_NULL_INPUT;
}
return BSL_SUCCESS;
}
int32_t UI_Write(BSL_UI *ui, BSL_UI_DataPack *data)
{
int32_t ret = UI_CheckDataWriteParam(ui, data);
if (ret != BSL_SUCCESS) {
return ret;
}
if (SAL_FPuts(ui->out, data->data)) {
(void)SAL_Flush(ui->out);
return BSL_SUCCESS;
}
BSL_ERR_PUSH_ERROR(BSL_UI_WRITE_ERROR);
return BSL_UI_WRITE_ERROR;
}
static int32_t UI_ReadInternal(BSL_UI *ui, char *result, int32_t resultLen)
{
if (SAL_FGets(ui->in, result, resultLen) == NULL) {
BSL_ERR_PUSH_ERROR(BSL_UI_FGETS_ERROR);
return BSL_UI_FGETS_ERROR;
}
int32_t ret = SAL_Feof(ui->in);
if (ret == BSL_SUCCESS || ret == BSL_SAL_FILE_NO_REG_FUNC) {
// The input stream will be closed when Ctrl+D is pressed, or func is not regeister.
BSL_ERR_PUSH_ERROR(BSL_UI_STDIN_END_ERROR);
return BSL_UI_STDIN_END_ERROR;
}
if (SAL_FileError(ui->in)) { // Previous file operation error
BSL_ERR_PUSH_ERROR(BSL_UI_OPERATION_ERROR);
return BSL_UI_OPERATION_ERROR;
}
// 2 is before the last one
if ((strlen(result) == (size_t)resultLen - 1) && (result[resultLen - 2] != '\n')) {
BSL_ERR_PUSH_ERROR(BSL_UI_READ_BUFF_TOO_LONG);
return BSL_UI_READ_BUFF_TOO_LONG;
}
return BSL_SUCCESS;
}
static int32_t UI_ReadSetFlag(BSL_UI *ui, uint32_t flags, struct termios *origTerm)
{
struct termios newTerm;
if (!BSL_UI_SUPPORT_ABILITY(flags, BSL_UI_DATA_FLAG_ECHO)) {
(void)memcpy_s(&newTerm, sizeof(newTerm), origTerm, sizeof(struct termios));
newTerm.c_lflag &= ~ECHO;
return SAL_FSetAttr(ui->in, TCSANOW, (void *)&newTerm);
}
return BSL_SUCCESS;
}
static int32_t UI_ReadRecoverFlag(BSL_UI *ui, uint32_t flags, struct termios *origTerm)
{
BSL_UI_DataPack endData = {0};
char endStr[] = "\n";
if (!BSL_UI_SUPPORT_ABILITY(flags, BSL_UI_DATA_FLAG_ECHO)) {
int32_t ret = SAL_FSetAttr(ui->in, TCSANOW, (void *)origTerm);
if (ret != BSL_SUCCESS) {
return ret;
}
endData.data = endStr;
endData.dataLen = (uint32_t)strlen(endStr) + 1;
ret = UI_Write(ui, &endData);
if (ret != BSL_SUCCESS) {
return ret;
}
}
return BSL_SUCCESS;
}
static int32_t UI_CheckDataReadParam(BSL_UI *ui, BSL_UI_DataPack *data)
{
int32_t ret = UI_CheckDataCommonParam(ui, data);
if (ret != BSL_SUCCESS) {
return ret;
}
if (ui->in == NULL) {
BSL_ERR_PUSH_ERROR(BSL_NULL_INPUT);
return BSL_NULL_INPUT;
}
return BSL_SUCCESS;
}
int32_t UI_Read(BSL_UI *ui, BSL_UI_DataPack *data)
{
struct termios origTerm;
char result[BSL_UI_READ_BUFF_MAX_LEN + 1]; // real buff + '\n' + '\0'
int32_t ret = UI_CheckDataReadParam(ui, data);
if (ret != BSL_SUCCESS) {
return ret;
}
ret = SAL_FGetAttr(ui->in, (void *)&origTerm);
if (ret != BSL_SUCCESS) {
return ret;
}
ret = UI_ReadSetFlag(ui, data->flags, &origTerm);
if (ret != BSL_SUCCESS) {
return ret;
}
do {
ret = UI_ReadInternal(ui, result, (int32_t)sizeof(result));
if (ret != BSL_SUCCESS) {
(void)UI_ReadRecoverFlag(ui, data->flags, &origTerm);
break;
}
ret = UI_ReadRecoverFlag(ui, data->flags, &origTerm);
if (ret != BSL_SUCCESS) {
break;
}
char *pos = strchr(result, '\n');
if (pos != NULL) {
*pos = '\0';
}
if (strlen(result) == 0) {
ret = BSL_UI_READ_LEN_TOO_SHORT;
break;
}
if (data->dataLen < (strlen(result) + 1)) {
ret = BSL_UI_OUTPUT_BUFF_TOO_SHORT;
break;
}
if (data->verifyData != NULL && strcmp(data->verifyData, result) != 0) {
ret = BSL_UI_VERIFY_BUFF_FAILED;
break;
}
(void)strcpy_s(data->data, data->dataLen, result);
data->dataLen = (uint32_t)strlen(result) + 1;
} while (0);
(void)memset_s(result, sizeof(result), 0, sizeof(result));
return ret;
}
int32_t UI_Close(BSL_UI *ui)
{
if (ui == NULL) {
return BSL_SUCCESS;
}
if (ui->in != NULL) {
BSL_SAL_FileClose(ui->in);
}
if (ui->out != NULL) {
BSL_SAL_FileClose(ui->out);
}
(void)BSL_SAL_ThreadUnlock(ui->lock);
return BSL_SUCCESS;
}
static BSL_UI_Method g_defaultUiMethod = {
UI_Open,
UI_Write,
UI_Read,
UI_Close
};
const BSL_UI_Method *BSL_UI_GetOperMethod(const BSL_UI *ui)
{
if (ui == NULL) {
return &g_defaultUiMethod;
}
return ui->method;
}
#endif /* HITLS_BSL_UI */
| 2302_82127028/openHiTLS-examples_1508 | bsl/ui/src/ui_default_impl.c | C | unknown | 6,624 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#ifndef UIO_BASE_H
#define UIO_BASE_H
#include "hitls_build.h"
#ifdef HITLS_BSL_UIO_PLT
#include "bsl_uio.h"
#ifdef __cplusplus
extern "C" {
#endif
struct BSL_UIO_MethodStruct {
int32_t uioType;
BslUioWriteCb uioWrite;
BslUioReadCb uioRead;
BslUioCtrlCb uioCtrl;
BslUioPutsCb uioPuts;
BslUioGetsCb uioGets;
BslUioCreateCb uioCreate;
BslUioDestroyCb uioDestroy;
};
/**
* @ingroup bsl_uio
*
* @brief Get the fd of the UIO object
* @param uio [IN] UIO object
* @retval File Descriptor fd
*/
int32_t BSL_UIO_GetFd(BSL_UIO *uio);
#ifdef __cplusplus
}
#endif
#endif /* HITLS_BSL_UIO_PLT */
#endif // UIO_BASE_H
| 2302_82127028/openHiTLS-examples_1508 | bsl/uio/include/uio_base.h | C | unknown | 1,201 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#include "hitls_build.h"
#ifdef HITLS_BSL_UIO_PLT
#include "securec.h"
#include "bsl_sal.h"
#include "bsl_binlog_id.h"
#include "bsl_log_internal.h"
#include "bsl_log.h"
#include "bsl_err_internal.h"
#include "bsl_errno.h"
#include "bsl_uio.h"
#include "uio_base.h"
#include "uio_abstraction.h"
BSL_UIO_Method *BSL_UIO_NewMethod(void)
{
BSL_UIO_Method *meth = (BSL_UIO_Method *)BSL_SAL_Calloc(1u, sizeof(BSL_UIO_Method));
if (meth == NULL) {
BSL_LOG_BINLOG_FIXLEN(BINLOG_ID05058, BSL_LOG_LEVEL_ERR, BSL_LOG_BINLOG_TYPE_RUN,
"new method is NULL.", NULL, NULL, NULL, NULL);
BSL_ERR_PUSH_ERROR(BSL_MALLOC_FAIL);
return NULL;
}
return meth;
}
void BSL_UIO_FreeMethod(BSL_UIO_Method *meth)
{
BSL_SAL_FREE(meth);
}
int32_t BSL_UIO_SetMethodType(BSL_UIO_Method *meth, int32_t type)
{
if (meth == NULL) {
BSL_ERR_PUSH_ERROR(BSL_NULL_INPUT);
BSL_LOG_BINLOG_FIXLEN(BINLOG_ID05059, BSL_LOG_LEVEL_ERR, BSL_LOG_BINLOG_TYPE_RUN,
"set method type is NULL.", NULL, NULL, NULL, NULL);
return BSL_NULL_INPUT;
}
meth->uioType = type;
return BSL_SUCCESS;
}
int32_t BSL_UIO_SetMethod(BSL_UIO_Method *meth, int32_t type, void *func)
{
if (meth == NULL || func == NULL) {
BSL_ERR_PUSH_ERROR(BSL_NULL_INPUT);
BSL_LOG_BINLOG_FIXLEN(BINLOG_ID05060, BSL_LOG_LEVEL_ERR, BSL_LOG_BINLOG_TYPE_RUN,
"set method is NULL.", NULL, NULL, NULL, NULL);
return BSL_NULL_INPUT;
}
switch (type) {
case BSL_UIO_WRITE_CB:
meth->uioWrite = func;
break;
case BSL_UIO_READ_CB:
meth->uioRead = func;
break;
case BSL_UIO_CTRL_CB:
meth->uioCtrl = func;
break;
case BSL_UIO_CREATE_CB:
meth->uioCreate = func;
break;
case BSL_UIO_DESTROY_CB:
meth->uioDestroy = func;
break;
case BSL_UIO_PUTS_CB:
meth->uioPuts = func;
break;
case BSL_UIO_GETS_CB:
meth->uioGets = func;
break;
default:
BSL_LOG_BINLOG_FIXLEN(BINLOG_ID05025, BSL_LOG_LEVEL_ERR, BSL_LOG_BINLOG_TYPE_RUN,
"method type is wrong.", NULL, NULL, NULL, NULL);
BSL_ERR_PUSH_ERROR(BSL_INVALID_ARG);
return BSL_INVALID_ARG;
}
return BSL_SUCCESS;
}
BSL_UIO *BSL_UIO_New(const BSL_UIO_Method *method)
{
int32_t ret = BSL_SUCCESS;
if (method == NULL) {
BSL_ERR_PUSH_ERROR(BSL_NULL_INPUT);
BSL_LOG_BINLOG_FIXLEN(BINLOG_ID05021, BSL_LOG_LEVEL_ERR, BSL_LOG_BINLOG_TYPE_RUN,
"method is NULL.", 0, 0, 0, 0);
return NULL;
}
BSL_UIO *uio = (BSL_UIO *)BSL_SAL_Calloc(1, sizeof(struct UIO_ControlBlock));
if (uio == NULL) {
BSL_ERR_PUSH_ERROR(BSL_MALLOC_FAIL);
BSL_LOG_BINLOG_FIXLEN(BINLOG_ID05022, BSL_LOG_LEVEL_ERR, BSL_LOG_BINLOG_TYPE_RUN,
"uio malloc fail.", 0, 0, 0, 0);
return NULL;
}
(void)memcpy_s(&uio->method, sizeof(BSL_UIO_Method), method, sizeof(BSL_UIO_Method));
BSL_SAL_ReferencesInit(&(uio->references));
BSL_UIO_SetIsUnderlyingClosedByUio(uio, false);
if (uio->method.uioCreate != NULL) {
ret = uio->method.uioCreate(uio);
if (ret != BSL_SUCCESS) {
BSL_ERR_PUSH_ERROR(BSL_UIO_FAIL);
BSL_LOG_BINLOG_FIXLEN(BINLOG_ID05023, BSL_LOG_LEVEL_ERR, BSL_LOG_BINLOG_TYPE_RUN,
"uio create data fail.", 0, 0, 0, 0);
BSL_SAL_FREE(uio);
return NULL;
}
}
return uio;
}
int32_t BSL_UIO_UpRef(BSL_UIO *uio)
{
if (uio == NULL) {
BSL_LOG_BINLOG_FIXLEN(BINLOG_ID05024, BSL_LOG_LEVEL_ERR, BSL_LOG_BINLOG_TYPE_RUN,
"uio is NULL.", 0, 0, 0, 0);
BSL_ERR_PUSH_ERROR(BSL_INTERNAL_EXCEPTION);
return BSL_INTERNAL_EXCEPTION;
}
if (uio->references.count == INT32_MAX) {
BSL_ERR_PUSH_ERROR(BSL_UIO_REF_MAX);
return BSL_UIO_REF_MAX;
}
int val = 0;
BSL_SAL_AtomicUpReferences(&(uio->references), &val);
return BSL_SUCCESS;
}
void BSL_UIO_Free(BSL_UIO *uio)
{
if (uio == NULL) {
return;
}
int ret = 0;
BSL_SAL_AtomicDownReferences(&(uio->references), &ret);
if (ret > 0) {
return;
}
if (uio->userData != NULL && uio->userDataFreeFunc != NULL) {
(void)uio->userDataFreeFunc(uio->userData);
uio->userData = NULL;
}
if (uio->method.uioDestroy != NULL) {
(void)uio->method.uioDestroy(uio);
}
BSL_SAL_ReferencesFree(&(uio->references));
BSL_SAL_FREE(uio);
return;
}
int32_t BSL_UIO_Write(BSL_UIO *uio, const void *data, uint32_t len, uint32_t *writeLen)
{
if (uio == NULL || uio->method.uioWrite == NULL || data == NULL || writeLen == NULL || len == 0) {
BSL_LOG_BINLOG_FIXLEN(BINLOG_ID05026, BSL_LOG_LEVEL_ERR, BSL_LOG_BINLOG_TYPE_RUN,
"uio write: internal input error.", 0, 0, 0, 0);
BSL_ERR_PUSH_ERROR(BSL_INTERNAL_EXCEPTION);
return BSL_INTERNAL_EXCEPTION; // if the uio is null, the send size is zero, means no data send;
}
if (uio->init != 1) {
BSL_ERR_PUSH_ERROR(BSL_UIO_UNINITIALIZED);
return BSL_UIO_UNINITIALIZED;
}
int32_t ret = uio->method.uioWrite(uio, data, len, writeLen);
if (ret == BSL_SUCCESS) {
uio->writeNum += (int64_t)*writeLen;
}
return ret;
}
int32_t BSL_UIO_Puts(BSL_UIO *uio, const char *buf, uint32_t *writeLen)
{
if (uio == NULL || uio->method.uioPuts == NULL || writeLen == NULL || buf == NULL) {
BSL_ERR_PUSH_ERROR(BSL_INTERNAL_EXCEPTION);
return BSL_INTERNAL_EXCEPTION;
}
if (uio->init != 1) {
BSL_ERR_PUSH_ERROR(BSL_UIO_UNINITIALIZED);
return BSL_UIO_UNINITIALIZED;
}
int32_t ret = uio->method.uioPuts(uio, buf, writeLen);
if (ret == BSL_SUCCESS) {
uio->writeNum += (int64_t)*writeLen;
}
return ret;
}
int32_t BSL_UIO_Gets(BSL_UIO *uio, char *buf, uint32_t *readLen)
{
if (uio == NULL || uio->method.uioGets == NULL || readLen == NULL || buf == NULL) {
BSL_ERR_PUSH_ERROR(BSL_INTERNAL_EXCEPTION);
return BSL_INTERNAL_EXCEPTION;
}
if (uio->init != 1) {
BSL_ERR_PUSH_ERROR(BSL_UIO_UNINITIALIZED);
return BSL_UIO_UNINITIALIZED;
}
int32_t ret = uio->method.uioGets(uio, buf, readLen);
if (ret == BSL_SUCCESS) {
uio->readNum += (int64_t)*readLen;
}
return ret;
}
int32_t BSL_UIO_Read(BSL_UIO *uio, void *data, uint32_t len, uint32_t *readLen)
{
if (uio == NULL || uio->method.uioRead == NULL || data == NULL || len == 0 || readLen == NULL) {
BSL_LOG_BINLOG_FIXLEN(BINLOG_ID05027, BSL_LOG_LEVEL_ERR, BSL_LOG_BINLOG_TYPE_RUN,
"uio read: internal input error.", 0, 0, 0, 0);
BSL_ERR_PUSH_ERROR(BSL_INTERNAL_EXCEPTION);
return BSL_INTERNAL_EXCEPTION;
}
if (uio->init != 1) {
BSL_ERR_PUSH_ERROR(BSL_UIO_UNINITIALIZED);
return BSL_UIO_UNINITIALIZED;
}
int32_t ret = uio->method.uioRead(uio, data, len, readLen);
if (ret == BSL_SUCCESS) {
uio->readNum += (int64_t)*readLen;
}
return ret;
}
int32_t BSL_UIO_GetTransportType(const BSL_UIO *uio)
{
if (uio == NULL) {
BSL_LOG_BINLOG_FIXLEN(BINLOG_ID05028, BSL_LOG_LEVEL_ERR, BSL_LOG_BINLOG_TYPE_RUN,
"uio is NULL.", 0, 0, 0, 0);
BSL_ERR_PUSH_ERROR(BSL_NULL_INPUT);
return BSL_NULL_INPUT;
}
return uio->method.uioType;
}
bool BSL_UIO_GetUioChainTransportType(BSL_UIO *uio, const BSL_UIO_TransportType uioType)
{
if (uio == NULL) {
BSL_LOG_BINLOG_FIXLEN(BINLOG_ID05069, BSL_LOG_LEVEL_ERR, BSL_LOG_BINLOG_TYPE_RUN,
"get uio type is NULL.", NULL, NULL, NULL, NULL);
return false;
}
while (uio != NULL) {
if (BSL_UIO_GetTransportType(uio) == (int32_t)uioType) {
return true;
}
uio = BSL_UIO_Next(uio);
}
return false;
}
int32_t BSL_UIO_SetUserData(BSL_UIO *uio, void *data)
{
if (uio == NULL) {
BSL_LOG_BINLOG_FIXLEN(BINLOG_ID05029, BSL_LOG_LEVEL_ERR, BSL_LOG_BINLOG_TYPE_RUN, "uio is NULL.", 0, 0, 0, 0);
BSL_ERR_PUSH_ERROR(BSL_NULL_INPUT);
return BSL_NULL_INPUT;
}
uio->userData = data;
return BSL_SUCCESS;
}
int32_t BSL_UIO_SetUserDataFreeFunc(BSL_UIO *uio, BSL_UIO_USERDATA_FREE_FUNC userDataFreeFunc)
{
if (uio == NULL) {
BSL_ERR_PUSH_ERROR(BSL_NULL_INPUT);
return BSL_NULL_INPUT;
}
uio->userDataFreeFunc = userDataFreeFunc;
return BSL_SUCCESS;
}
void *BSL_UIO_GetUserData(const BSL_UIO *uio)
{
if (uio == NULL) {
return NULL;
}
return uio->userData;
}
bool BSL_UIO_GetIsUnderlyingClosedByUio(const BSL_UIO *uio)
{
if (uio == NULL) {
return false; // If the value is empty, the function will not release the value.
}
return uio->isUnderlyingClosedByUio;
}
void BSL_UIO_SetIsUnderlyingClosedByUio(BSL_UIO *uio, bool close)
{
if (uio == NULL) {
return;
}
uio->isUnderlyingClosedByUio = close;
}
const BSL_UIO_Method *BSL_UIO_GetMethod(const BSL_UIO *uio)
{
if (uio == NULL) {
return NULL;
}
return &uio->method;
}
static int32_t UIO_GetInit(BSL_UIO *uio, int32_t larg, bool *parg)
{
if (larg != (int32_t)sizeof(bool) || parg == NULL) {
BSL_ERR_PUSH_ERROR(BSL_INVALID_ARG);
return BSL_INVALID_ARG;
}
*parg = uio->init;
return BSL_SUCCESS;
}
static int32_t UIO_GetReadNum(BSL_UIO *uio, int32_t larg, int64_t *parg)
{
if (larg != (int32_t)sizeof(int64_t) || parg == NULL) {
BSL_ERR_PUSH_ERROR(BSL_INVALID_ARG);
return BSL_INVALID_ARG;
}
*parg = uio->readNum;
return BSL_SUCCESS;
}
static int32_t UIO_GetWriteNum(BSL_UIO *uio, int32_t larg, int64_t *parg)
{
if (larg != (int32_t)sizeof(int64_t) || parg == NULL) {
BSL_ERR_PUSH_ERROR(BSL_INVALID_ARG);
return BSL_INVALID_ARG;
}
*parg = uio->writeNum;
return BSL_SUCCESS;
}
int32_t BSL_UIO_Ctrl(BSL_UIO *uio, int32_t cmd, int32_t larg, void *parg)
{
if (uio == NULL || uio->method.uioCtrl == NULL) {
BSL_ERR_PUSH_ERROR(BSL_NULL_INPUT);
return BSL_NULL_INPUT;
}
switch (cmd) {
case BSL_UIO_GET_INIT:
return UIO_GetInit(uio, larg, parg);
case BSL_UIO_GET_READ_NUM:
return UIO_GetReadNum(uio, larg, parg);
case BSL_UIO_GET_WRITE_NUM:
return UIO_GetWriteNum(uio, larg, parg);
default:
return uio->method.uioCtrl(uio, cmd, larg, parg);
}
}
void *BSL_UIO_GetCtx(const BSL_UIO *uio)
{
if (uio == NULL) {
return NULL;
}
return uio->ctx;
}
void BSL_UIO_SetCtx(BSL_UIO *uio, void *ctx)
{
if (uio != NULL) {
uio->ctx = ctx;
}
}
int32_t BSL_UIO_GetFd(BSL_UIO *uio)
{
int32_t fd = -1;
(void)BSL_UIO_Ctrl(uio, BSL_UIO_GET_FD, (int32_t)sizeof(fd), &fd); // Parameters are checked by each ctrl function.
return fd;
}
void BSL_UIO_SetFD(BSL_UIO *uio, int fd)
{
bool invalid = (uio == NULL) || (fd < 0);
if (invalid) {
return;
}
BSL_UIO_Ctrl(uio, BSL_UIO_SET_FD, (int32_t)sizeof(fd), &fd);
}
int32_t BSL_UIO_SetFlags(BSL_UIO *uio, uint32_t flags)
{
if (uio == NULL) {
BSL_ERR_PUSH_ERROR(BSL_NULL_INPUT);
return BSL_NULL_INPUT;
}
uint32_t validFlags =
BSL_UIO_FLAGS_RWS | BSL_UIO_FLAGS_SHOULD_RETRY | BSL_UIO_FLAGS_BASE64_NO_NEWLINE | BSL_UIO_FLAGS_BASE64_PEM;
if ((flags & validFlags) == 0 || flags > validFlags) {
BSL_ERR_PUSH_ERROR(BSL_INVALID_ARG);
return BSL_INVALID_ARG;
}
uio->flags |= flags;
return BSL_SUCCESS;
}
int32_t BSL_UIO_ClearFlags(BSL_UIO *uio, uint32_t flags)
{
if (uio == NULL) {
BSL_ERR_PUSH_ERROR(BSL_NULL_INPUT);
return BSL_NULL_INPUT;
}
uio->flags &= ~flags;
return BSL_SUCCESS;
}
uint32_t BSL_UIO_TestFlags(const BSL_UIO *uio, uint32_t flags, uint32_t *out)
{
if (uio == NULL || out == NULL) {
BSL_ERR_PUSH_ERROR(BSL_NULL_INPUT);
return BSL_NULL_INPUT;
}
*out = uio->flags & flags;
return BSL_SUCCESS;
}
void BSL_UIO_SetInit(BSL_UIO *uio, bool init)
{
if (uio != NULL) {
uio->init = init;
}
}
/**
* @brief Checking for Fatal I/O Errors
*
* @param err [IN] error type
*
* @return true :No Fatal error
* false:fatal errors
*/
bool UioIsNonFatalErr(int32_t err)
{
bool ret = true;
/** @alias Check whether err is a fatal error and modify ret. */
switch (err) {
#if defined(ENOTCONN)
case ENOTCONN:
#endif
#ifdef EINTR
case EINTR:
#endif
#ifdef EINPROGRESS
case EINPROGRESS:
#endif
#ifdef EWOULDBLOCK
#if !defined(WSAEWOULDBLOCK) || WSAEWOULDBLOCK != EWOULDBLOCK
case EWOULDBLOCK:
#endif
#endif
#ifdef EAGAIN
#if EWOULDBLOCK != EAGAIN
case EAGAIN:
#endif
#endif
#ifdef EALREADY
case EALREADY:
#endif
#ifdef EPROTO
case EPROTO:
#endif
#ifdef EMSGSIZE
case EMSGSIZE:
#endif
ret = true;
break;
default:
ret = false;
break;
}
return ret;
}
int32_t BSL_UIO_Append(BSL_UIO *uio, BSL_UIO *tail)
{
bool invalid = (uio == NULL) || (tail == NULL);
if (invalid) {
BSL_ERR_PUSH_ERROR(BSL_NULL_INPUT);
return BSL_NULL_INPUT;
}
BSL_UIO *t = uio;
while (t->next != NULL) {
t = t->next;
}
t->next = tail;
tail->prev = t;
return BSL_SUCCESS;
}
BSL_UIO *BSL_UIO_PopCurrent(BSL_UIO *uio)
{
if (uio == NULL) {
return NULL;
}
BSL_UIO *ret = uio->next;
if (uio->prev != NULL) {
uio->prev->next = uio->next;
}
if (uio->next != NULL) {
uio->next->prev = uio->prev;
}
uio->prev = NULL;
uio->next = NULL;
return ret;
}
void BSL_UIO_FreeChain(BSL_UIO *uio)
{
BSL_UIO *b = uio;
while (b != NULL) {
int ref = b->references.count;
BSL_UIO *next = b->next;
BSL_UIO_Free(b);
if (ref > 1) {
break;
}
b = next;
}
}
BSL_UIO *BSL_UIO_Next(BSL_UIO *uio)
{
if (uio == NULL) {
return NULL;
}
return uio->next;
}
#endif /* HITLS_BSL_UIO_PLT */
| 2302_82127028/openHiTLS-examples_1508 | bsl/uio/src/uio_abstraction.c | C | unknown | 14,941 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#ifndef UIO_ABSTRACTION_H
#define UIO_ABSTRACTION_H
#include "hitls_build.h"
#ifdef HITLS_BSL_UIO_PLT
#include "bsl_uio.h"
#include "uio_base.h"
#include "sal_atomic.h"
#ifdef __cplusplus
extern "C" {
#endif
#define IP_ADDR_V4_LEN 4
#define IP_ADDR_V6_LEN 16
#define IP_ADDR_MAX_LEN IP_ADDR_V6_LEN
#define SOCK_ADDR_V4_LEN (sizeof(struct sockaddr_in))
#define SOCK_ADDR_V6_LEN (sizeof(struct sockaddr_in6))
#define SOCK_ADDR_UNIX_LEN (sizeof(struct sockaddr_un))
#define DGRAM_SOCKADDR_MAX_LEN SOCK_ADDR_UNIX_LEN
struct UIO_ControlBlock {
struct BSL_UIO_MethodStruct method;
uint32_t flags; // Read/write retry flag. For details, see BSL_UIO_FLAGS_* in bsl_uio.h
bool init; // Initialization flag. 1 means it's initialized, and 0 means it's not initialized.
int64_t writeNum; // count of write
int64_t readNum; // count of read
void *ctx; // Context
uint32_t ctxLen; // Context length
void *userData; // User data
BSL_UIO_USERDATA_FREE_FUNC userDataFreeFunc; // Release User Data
struct UIO_ControlBlock *prev; // Previous UIO object of the current UIO object in the UIO chain
struct UIO_ControlBlock *next; // Next UIO object of the current UIO object in the UIO chain
bool isUnderlyingClosedByUio; // Indicates whether related resources are released together with the UIO.
BSL_SAL_RefCount references; // reference count
};
typedef struct {
uint8_t *data;
uint64_t size;
} BSL_UIO_CtrlGetInfoParam;
/**
* @brief Check whether a given error code is a fatal error.
*
* @param err [IN] Error code.
*
* @return true: A fatal error occurs.
* false: No fatal error occurs.
*/
bool UioIsNonFatalErr(int32_t err);
#ifdef __cplusplus
}
#endif
#endif /* HITLS_BSL_UIO_PLT */
#endif // UIO_ABSTRACTION_H
| 2302_82127028/openHiTLS-examples_1508 | bsl/uio/src/uio_abstraction.h | C | unknown | 2,430 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#include "hitls_build.h"
#ifdef HITLS_BSL_UIO_BUFFER
#include "securec.h"
#include "bsl_sal.h"
#include "bsl_errno.h"
#include "bsl_err_internal.h"
#include "bsl_uio.h"
#include "uio_abstraction.h"
// The write behavior must be the same.
#define UIO_BUFFER_DEFAULT_SIZE 4096
#define DTLS_MIN_MTU 256 /* Minimum MTU setting size */
#define DTLS_MAX_MTU_OVERHEAD 48 /* Highest MTU overhead, IPv6 40 + UDP 8 */
typedef struct {
uint32_t outSize;
// This variable will make the write() logic consistent with the ossl. Reason:
// 1) The handshake logic is complex.
// 2) The behavior consistency problem of the handshake logic is difficult to locate.
uint32_t outOff;
uint32_t outLen;
uint8_t *outBuf;
} BufferCtx;
static int32_t BufferCreate(BSL_UIO *uio)
{
if (uio == NULL) {
BSL_ERR_PUSH_ERROR(BSL_NULL_INPUT);
return BSL_NULL_INPUT;
}
BufferCtx *ctx = BSL_SAL_Calloc(1, sizeof(BufferCtx));
if (ctx == NULL) {
BSL_ERR_PUSH_ERROR(BSL_MALLOC_FAIL);
return BSL_MALLOC_FAIL;
}
ctx->outSize = UIO_BUFFER_DEFAULT_SIZE;
ctx->outBuf = (uint8_t *)BSL_SAL_Malloc(UIO_BUFFER_DEFAULT_SIZE);
if (ctx->outBuf == NULL) {
BSL_SAL_FREE(ctx);
BSL_ERR_PUSH_ERROR(BSL_MALLOC_FAIL);
return BSL_MALLOC_FAIL;
}
BSL_UIO_SetCtx(uio, ctx);
uio->init = 1;
return BSL_SUCCESS;
}
static int32_t BufferDestroy(BSL_UIO *uio)
{
if (uio == NULL) {
BSL_ERR_PUSH_ERROR(BSL_NULL_INPUT);
return BSL_NULL_INPUT;
}
BufferCtx *ctx = BSL_UIO_GetCtx(uio);
if (ctx != NULL) {
BSL_SAL_FREE(ctx->outBuf);
BSL_SAL_FREE(ctx);
BSL_UIO_SetCtx(uio, NULL);
}
uio->flags = 0;
uio->init = 0;
return BSL_SUCCESS;
}
static int32_t BufferFlushInternal(BSL_UIO *uio)
{
BufferCtx *ctx = BSL_UIO_GetCtx(uio);
if (ctx == NULL) {
BSL_ERR_PUSH_ERROR(BSL_NULL_INPUT);
return BSL_NULL_INPUT;
}
while (ctx->outLen > 0) {
uint32_t tmpWriteLen = 0;
int32_t ret = BSL_UIO_Write(uio->next, &ctx->outBuf[ctx->outOff], ctx->outLen, &tmpWriteLen);
if (ret != BSL_SUCCESS) {
uio->flags = uio->next->flags;
return ret;
}
if (tmpWriteLen == 0) {
BSL_ERR_PUSH_ERROR(BSL_UIO_IO_BUSY);
return BSL_UIO_IO_BUSY;
}
ctx->outOff += tmpWriteLen;
ctx->outLen -= tmpWriteLen;
}
ctx->outOff = 0;
ctx->outLen = 0;
return BSL_SUCCESS;
}
static int32_t BufferFlush(BSL_UIO *uio, int32_t larg, void *parg)
{
bool invalid = (uio == NULL) || (uio->next == NULL) || (uio->ctx == NULL);
if (invalid) {
BSL_ERR_PUSH_ERROR(BSL_NULL_INPUT);
return BSL_NULL_INPUT;
}
BufferCtx *ctx = BSL_UIO_GetCtx(uio);
if (ctx->outLen == 0) { // invoke the flush of the next UIO object
return BSL_UIO_Ctrl(uio->next, BSL_UIO_FLUSH, larg, parg);
}
(void)BSL_UIO_ClearFlags(uio, (BSL_UIO_FLAGS_RWS | BSL_UIO_FLAGS_SHOULD_RETRY));
int32_t ret = BufferFlushInternal(uio);
if (ret != BSL_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
return ret;
}
return BSL_UIO_Ctrl(uio->next, BSL_UIO_FLUSH, larg, parg);
}
static int32_t BufferReset(BSL_UIO *uio)
{
if (uio == NULL || uio->ctx == NULL) {
BSL_ERR_PUSH_ERROR(BSL_NULL_INPUT);
return BSL_NULL_INPUT;
}
BufferCtx *ctx = uio->ctx;
ctx->outLen = 0;
ctx->outOff = 0;
if (uio->next == NULL) {
BSL_ERR_PUSH_ERROR(BSL_NULL_INPUT);
return BSL_NULL_INPUT;
}
return BSL_UIO_Ctrl(uio->next, BSL_UIO_RESET, 0, NULL);
}
static int32_t BufferSetBufferSize(BSL_UIO *uio, int32_t larg, void *parg)
{
if (larg != (int32_t)sizeof(uint32_t) || parg == NULL || *(uint32_t *)parg < DTLS_MIN_MTU - DTLS_MAX_MTU_OVERHEAD) {
BSL_ERR_PUSH_ERROR(BSL_INVALID_ARG);
return BSL_INVALID_ARG;
}
BufferCtx *ctx = BSL_UIO_GetCtx(uio);
if (ctx == NULL) {
BSL_ERR_PUSH_ERROR(BSL_NULL_INPUT);
return BSL_NULL_INPUT;
}
uint32_t len = *(uint32_t *)parg;
BSL_SAL_FREE(ctx->outBuf);
ctx->outBuf = (uint8_t *)BSL_SAL_Malloc(len);
if (ctx->outBuf == NULL) {
BSL_ERR_PUSH_ERROR(BSL_MALLOC_FAIL);
return BSL_MALLOC_FAIL;
}
ctx->outOff = 0;
ctx->outLen = 0;
ctx->outSize = len;
return BSL_SUCCESS;
}
static int32_t BufferCtrl(BSL_UIO *uio, int32_t cmd, int32_t larg, void *parg)
{
switch (cmd) {
case BSL_UIO_FLUSH:
return BufferFlush(uio, larg, parg);
case BSL_UIO_RESET:
return BufferReset(uio);
case BSL_UIO_SET_BUFFER_SIZE:
return BufferSetBufferSize(uio, larg, parg);
default:
if (uio->next != NULL) {
return BSL_UIO_Ctrl(uio->next, cmd, larg, parg);
}
break;
}
BSL_ERR_PUSH_ERROR(BSL_UIO_FAIL);
return BSL_UIO_FAIL;
}
// Add data to the remaining space.
static int32_t TryCompleteBuffer(BufferCtx *ctx, const void *in, uint32_t remain, uint32_t *writeLen)
{
const uint32_t freeSpace = ctx->outSize - (ctx->outOff + ctx->outLen);
if (freeSpace == 0) {
return BSL_SUCCESS;
}
const uint32_t real = (freeSpace < remain) ? freeSpace : remain;
if (memcpy_s(&ctx->outBuf[ctx->outOff + ctx->outLen], freeSpace, in, real) != EOK) {
BSL_ERR_PUSH_ERROR(BSL_UIO_IO_EXCEPTION);
return BSL_UIO_IO_EXCEPTION;
}
ctx->outLen += real;
*writeLen += real;
return BSL_SUCCESS;
}
static int32_t BufferWrite(BSL_UIO *uio, const void *buf, uint32_t len, uint32_t *writeLen)
{
bool invalid = (uio == NULL) || (buf == NULL) || (writeLen == NULL) || (uio->next == NULL);
if (invalid) {
BSL_ERR_PUSH_ERROR(BSL_NULL_INPUT);
return BSL_NULL_INPUT;
}
*writeLen = 0;
BufferCtx *ctx = BSL_UIO_GetCtx(uio);
invalid = (ctx == NULL) || (ctx->outBuf == NULL);
if (invalid) {
BSL_ERR_PUSH_ERROR(BSL_NULL_INPUT);
return BSL_NULL_INPUT;
}
(void)BSL_UIO_ClearFlags(uio, (BSL_UIO_FLAGS_RWS | BSL_UIO_FLAGS_SHOULD_RETRY));
const uint8_t *in = buf;
uint32_t remain = len;
while (remain > 0) {
const uint32_t freeSpace = ctx->outSize - (ctx->outOff + ctx->outLen);
if (freeSpace >= remain) { // If the space is sufficient, cache the data.
return TryCompleteBuffer(ctx, in, remain, writeLen);
}
// else: space is insufficient
if (ctx->outLen > 0) { // buffer already has data, need to send the existing data first.
int32_t ret = BufferFlushInternal(uio);
// If next uio return busy, return success, upper layer will return busy
if (ret == BSL_UIO_IO_BUSY) {
return BSL_SUCCESS;
}
if (ret != BSL_SUCCESS) {
return ret;
}
}
ctx->outOff = 0;
while (remain >= ctx->outSize) {
uint32_t tmpWriteLen = 0;
int32_t ret = BSL_UIO_Write(uio->next, in, remain, &tmpWriteLen);
if (ret != BSL_SUCCESS || tmpWriteLen == 0) {
uio->flags = uio->next->flags;
return ret;
}
*writeLen += tmpWriteLen;
in = &in[tmpWriteLen];
remain -= tmpWriteLen;
}
}
return BSL_SUCCESS;
}
const BSL_UIO_Method *BSL_UIO_BufferMethod(void)
{
static const BSL_UIO_Method m = {
BSL_UIO_BUFFER,
BufferWrite,
NULL,
BufferCtrl,
NULL,
NULL,
BufferCreate,
BufferDestroy,
};
return &m;
}
#endif /* HITLS_BSL_UIO_BUFFER */
| 2302_82127028/openHiTLS-examples_1508 | bsl/uio/src/uio_buffer.c | C | unknown | 8,257 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#include "hitls_build.h"
#ifdef HITLS_BSL_UIO_FILE
#include <stdio.h>
#include "securec.h"
#include "bsl_sal.h"
#include "bsl_err_internal.h"
#include "bsl_errno.h"
#include "uio_base.h"
#include "uio_abstraction.h"
#include "sal_file.h"
#include "bsl_uio.h"
static int32_t FileWrite(BSL_UIO *uio, const void *buf, uint32_t len, uint32_t *writeLen)
{
if (BSL_UIO_GetCtx(uio) == NULL) {
BSL_ERR_PUSH_ERROR(BSL_NULL_INPUT);
return BSL_NULL_INPUT;
}
*writeLen = 0;
if (len == 0) {
return BSL_SUCCESS;
}
bsl_sal_file_handle f = BSL_UIO_GetCtx(uio);
int32_t ret = BSL_SAL_FileWrite(f, buf, 1, len);
if (ret != BSL_SUCCESS) {
BSL_ERR_PUSH_ERROR(BSL_UIO_FAIL);
return BSL_UIO_FAIL;
}
*writeLen = len;
return BSL_SUCCESS;
}
static int32_t FileRead(BSL_UIO *uio, void *buf, uint32_t len, uint32_t *readLen)
{
if (BSL_UIO_GetCtx(uio) == NULL) {
BSL_ERR_PUSH_ERROR(BSL_NULL_INPUT);
return BSL_NULL_INPUT;
}
*readLen = 0;
size_t rLen;
bsl_sal_file_handle f = BSL_UIO_GetCtx(uio);
int32_t ret = BSL_SAL_FileRead(f, buf, 1, len, &rLen);
if (ret != BSL_SUCCESS) {
BSL_ERR_PUSH_ERROR(BSL_UIO_FAIL);
return BSL_UIO_FAIL;
}
*readLen = (uint32_t)rLen;
return BSL_SUCCESS;
}
static int32_t FileDestroy(BSL_UIO *uio)
{
if (BSL_UIO_GetIsUnderlyingClosedByUio(uio)) { // the closing of the file is specified by the user
bsl_sal_file_handle f = BSL_UIO_GetCtx(uio);
if (f != NULL) {
BSL_SAL_FileClose(f);
BSL_UIO_SetCtx(uio, NULL);
}
}
uio->init = false;
return BSL_SUCCESS;
}
static int32_t FileOpen(BSL_UIO *uio, uint32_t flags, const char *filename)
{
if (filename == NULL) {
BSL_ERR_PUSH_ERROR(BSL_NULL_INPUT);
return BSL_NULL_INPUT;
}
bsl_sal_file_handle f = BSL_UIO_GetCtx(uio);
if (f != NULL) {
if (BSL_UIO_GetIsUnderlyingClosedByUio(uio)) {
BSL_SAL_FileClose(f);
BSL_UIO_SetCtx(uio, NULL);
} else {
return BSL_UIO_EXIST_CONTEXT_NOT_RELEASED;
}
}
const char *mode = NULL;
bsl_sal_file_handle fileHandle = NULL;
if ((flags & BSL_UIO_FILE_APPEND) != 0) {
mode = ((flags & BSL_UIO_FILE_READ) != 0) ? "a+" : "a";
} else if ((flags & BSL_UIO_FILE_READ) != 0) {
mode = ((flags & BSL_UIO_FILE_WRITE) != 0) ? "r+" : "r";
} else if ((flags & BSL_UIO_FILE_WRITE) != 0) {
mode = "w";
} else {
BSL_ERR_PUSH_ERROR(BSL_UIO_FILE_OPEN_FAIL);
return BSL_UIO_FILE_OPEN_FAIL;
}
if (BSL_SAL_FileOpen(&fileHandle, filename, mode) != BSL_SUCCESS) {
BSL_ERR_PUSH_ERROR(BSL_UIO_FILE_OPEN_FAIL);
return BSL_UIO_FILE_OPEN_FAIL;
}
BSL_UIO_SetCtx(uio, (void *)fileHandle);
uio->init = true;
return BSL_SUCCESS;
}
static int32_t FilePending(BSL_UIO *uio, int32_t larg, uint64_t *ret)
{
if (ret == NULL || larg != sizeof(*ret)) {
return BSL_INVALID_ARG;
}
bsl_sal_file_handle f = BSL_UIO_GetCtx(uio);
if (f == NULL) {
BSL_ERR_PUSH_ERROR(BSL_NULL_INPUT);
return BSL_NULL_INPUT;
}
long current; // save the current
if (SAL_FileTell(f, ¤t) != BSL_SUCCESS) {
BSL_ERR_PUSH_ERROR(BSL_INTERNAL_EXCEPTION);
return BSL_INTERNAL_EXCEPTION;
}
if (SAL_FileSeek(f, 0, SEEK_END) != BSL_SUCCESS) { // move to the end
BSL_ERR_PUSH_ERROR(BSL_INTERNAL_EXCEPTION);
return BSL_INTERNAL_EXCEPTION;
}
long max; // get the length
if (SAL_FileTell(f, &max) != BSL_SUCCESS || max < current) { // error, including < 0, should restore the current
(void)SAL_FileSeek(f, current, SEEK_SET);
BSL_ERR_PUSH_ERROR(BSL_INTERNAL_EXCEPTION);
return BSL_INTERNAL_EXCEPTION;
}
*ret = (uint64_t)(max - current); // save the remaining length
(void)SAL_FileSeek(f, current, SEEK_SET); // recover it
return BSL_SUCCESS;
}
static int32_t FileWpending(int32_t larg, int64_t *ret)
{
if (larg != sizeof(int64_t) || ret == NULL) {
return BSL_INVALID_ARG;
}
*ret = 0; // should return 0 if it's file UIO
return BSL_SUCCESS;
}
static int32_t FileSetPtr(BSL_UIO *uio, int32_t isClosed, FILE *fp)
{
if (fp == NULL || (isClosed != 0 && isClosed != 1)) {
return BSL_INVALID_ARG;
}
bsl_sal_file_handle file = BSL_UIO_GetCtx(uio);
if (file != NULL) {
if (BSL_UIO_GetIsUnderlyingClosedByUio(uio)) {
BSL_SAL_FileClose(file);
BSL_UIO_SetCtx(uio, NULL);
} else {
return BSL_UIO_EXIST_CONTEXT_NOT_RELEASED;
}
}
BSL_UIO_SetCtx(uio, fp);
BSL_UIO_SetIsUnderlyingClosedByUio(uio, isClosed);
uio->init = true;
return BSL_SUCCESS;
}
static int32_t FileReset(BSL_UIO *uio)
{
if (BSL_UIO_GetCtx(uio) == NULL) {
BSL_ERR_PUSH_ERROR(BSL_NULL_INPUT);
return BSL_NULL_INPUT;
}
bsl_sal_file_handle *f = BSL_UIO_GetCtx(uio);
if (SAL_FileSeek(f, 0, SEEK_SET) != 0) {
BSL_ERR_PUSH_ERROR(BSL_INTERNAL_EXCEPTION);
return BSL_INTERNAL_EXCEPTION;
}
return BSL_SUCCESS;
}
static int32_t FileGetEof(BSL_UIO *uio, int32_t larg, bool *isEof)
{
if (larg != 1 || isEof == NULL) {
return BSL_INVALID_ARG;
}
*isEof = false;
if (BSL_UIO_GetCtx(uio) == NULL) {
BSL_ERR_PUSH_ERROR(BSL_NULL_INPUT);
return BSL_NULL_INPUT;
}
FILE *f = BSL_UIO_GetCtx(uio);
if (feof(f) != 0) {
*isEof = true;
}
return BSL_SUCCESS;
}
static int32_t FileFlush(BSL_UIO *uio)
{
FILE *file = BSL_UIO_GetCtx(uio);
if (file == NULL) {
BSL_ERR_PUSH_ERROR(BSL_NULL_INPUT);
return BSL_NULL_INPUT;
}
if (!SAL_Flush(file)) {
return BSL_UIO_IO_EXCEPTION;
}
return BSL_SUCCESS;
}
static int32_t FileCtrl(BSL_UIO *uio, int32_t cmd, int32_t larg, void *parg)
{
if (larg < 0) {
BSL_ERR_PUSH_ERROR(BSL_UIO_FAIL);
return BSL_UIO_FAIL;
}
switch (cmd) {
case BSL_UIO_FILE_OPEN:
return FileOpen(uio, (uint32_t)larg, parg);
case BSL_UIO_PENDING:
return FilePending(uio, larg, parg);
case BSL_UIO_WPENDING:
return FileWpending(larg, parg);
case BSL_UIO_FILE_PTR:
return FileSetPtr(uio, larg, parg);
case BSL_UIO_RESET:
return FileReset(uio);
case BSL_UIO_FILE_GET_EOF:
return FileGetEof(uio, larg, parg);
case BSL_UIO_FLUSH:
return FileFlush(uio);
default:
break;
}
BSL_ERR_PUSH_ERROR(BSL_UIO_FAIL);
return BSL_UIO_FAIL;
}
static int32_t FileGets(BSL_UIO *uio, char *buf, uint32_t *readLen)
{
if (BSL_UIO_GetCtx(uio) == NULL) {
BSL_ERR_PUSH_ERROR(BSL_NULL_INPUT);
return BSL_NULL_INPUT;
}
(void)BSL_UIO_ClearFlags(uio, BSL_UIO_FLAGS_RWS | BSL_UIO_FLAGS_SHOULD_RETRY);
bsl_sal_file_handle f = BSL_UIO_GetCtx(uio);
if (SAL_FGets(f, buf, (int32_t)*readLen) == NULL) {
*readLen = 0;
if (SAL_FileError(f) == false) { // read the end of the file successfully
return BSL_SUCCESS;
}
(void)BSL_UIO_SetFlags(uio, BSL_UIO_FLAGS_READ | BSL_UIO_FLAGS_SHOULD_RETRY);
BSL_ERR_PUSH_ERROR(BSL_UIO_FAIL);
return BSL_UIO_FAIL;
}
*readLen = (uint32_t)strlen(buf);
return BSL_SUCCESS;
}
static int32_t FilePuts(BSL_UIO *uio, const char *buf, uint32_t *writeLen)
{
uint32_t len = 0;
if (buf != NULL) {
len = (uint32_t)strlen(buf);
}
return FileWrite(uio, buf, len, writeLen);
}
const BSL_UIO_Method *BSL_UIO_FileMethod(void)
{
static const BSL_UIO_Method METHOD = {
BSL_UIO_FILE,
FileWrite,
FileRead,
FileCtrl,
FilePuts,
FileGets,
NULL,
FileDestroy,
};
return &METHOD;
}
#endif /* HITLS_BSL_UIO_FILE */
| 2302_82127028/openHiTLS-examples_1508 | bsl/uio/src/uio_file.c | C | unknown | 8,548 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#include "hitls_build.h"
#ifdef HITLS_BSL_UIO_MEM
#include "securec.h"
#include "bsl_buffer.h"
#include "bsl_errno.h"
#include "bsl_err_internal.h"
#include "uio_base.h"
#include "uio_abstraction.h"
#include "bsl_uio.h"
typedef struct {
BSL_BufMem *buf;
BSL_BufMem *tmpBuf; // only used in read-only mode
size_t readIndex;
int32_t eof; // Behavior when reading empty memory. If the value is not 0, retry will be set.
} UIO_BufMem;
static int32_t MemNewBuf(BSL_UIO *uio, int32_t len, void *buf)
{
if (buf == NULL || len < 0) {
BSL_ERR_PUSH_ERROR(BSL_NULL_INPUT);
return BSL_NULL_INPUT;
}
UIO_BufMem *ubm = BSL_UIO_GetCtx(uio);
if (ubm == NULL || ubm->buf == NULL) {
BSL_ERR_PUSH_ERROR(BSL_UIO_FAIL);
return BSL_UIO_FAIL;
}
BSL_BufMem *bm = ubm->buf;
if (bm->data != NULL && (uio->flags & BSL_UIO_FLAGS_MEM_READ_ONLY) == 0) {
/* If the UIO mode is not read-only, need to release the memory first.
* Otherwise, the internal memory applied for the read/write mode will be overwritten,
*/
BSL_ERR_PUSH_ERROR(BSL_UIO_MEM_NOT_NULL);
return BSL_UIO_MEM_NOT_NULL;
}
if (ubm->tmpBuf == NULL) {
ubm->tmpBuf = BSL_BufMemNew();
if (ubm->tmpBuf == NULL) {
BSL_ERR_PUSH_ERROR(BSL_MALLOC_FAIL);
return BSL_MALLOC_FAIL;
}
}
ubm->readIndex = 0;
ubm->eof = 0; // Read-only memory, and retry is not required.
bm->length = (size_t)len;
bm->max = (size_t)len;
bm->data = (void *)buf;
uio->flags = BSL_UIO_FLAGS_MEM_READ_ONLY;
return BSL_SUCCESS;
}
static int32_t UioBufMemSync(UIO_BufMem *ubm)
{
if (ubm != NULL && ubm->readIndex != 0) {
if (memmove_s(ubm->buf->data, ubm->buf->length, ubm->buf->data + ubm->readIndex,
ubm->buf->length - ubm->readIndex) != EOK) {
BSL_ERR_PUSH_ERROR(BSL_UIO_FAIL);
return BSL_UIO_FAIL;
}
ubm->buf->length -= ubm->readIndex;
ubm->readIndex = 0;
}
return BSL_SUCCESS;
}
static int32_t MemWrite(BSL_UIO *uio, const void *buf, uint32_t len, uint32_t *writeLen)
{
if (BSL_UIO_GetCtx(uio) == NULL) {
BSL_ERR_PUSH_ERROR(BSL_NULL_INPUT);
return BSL_NULL_INPUT;
}
if ((uio->flags & BSL_UIO_FLAGS_MEM_READ_ONLY) != 0) {
BSL_ERR_PUSH_ERROR(BSL_UIO_WRITE_NOT_ALLOWED);
return BSL_UIO_WRITE_NOT_ALLOWED;
}
(void)BSL_UIO_ClearFlags(uio, BSL_UIO_FLAGS_RWS | BSL_UIO_FLAGS_SHOULD_RETRY);
*writeLen = 0;
if (len == 0) {
return BSL_SUCCESS;
}
UIO_BufMem *ubm = BSL_UIO_GetCtx(uio);
if (ubm == NULL || ubm->buf == NULL) {
BSL_ERR_PUSH_ERROR(BSL_UIO_FAIL);
return BSL_UIO_FAIL;
}
if (UioBufMemSync(ubm) != BSL_SUCCESS) {
BSL_ERR_PUSH_ERROR(BSL_MEMMOVE_FAIL);
return BSL_MEMMOVE_FAIL;
}
const size_t origLen = ubm->buf->length;
if (BSL_BufMemGrowClean(ubm->buf, origLen + len) == 0) {
BSL_ERR_PUSH_ERROR(BSL_UIO_MEM_GROW_FAIL);
return BSL_UIO_MEM_GROW_FAIL;
}
// memory grow guarantee of success here
(void)memcpy_s(ubm->buf->data + origLen, len, buf, len);
*writeLen = len;
return BSL_SUCCESS;
}
static int32_t MemRead(BSL_UIO *uio, void *buf, uint32_t len, uint32_t *readLen)
{
if (BSL_UIO_GetCtx(uio) == NULL) {
BSL_ERR_PUSH_ERROR(BSL_NULL_INPUT);
return BSL_NULL_INPUT;
}
(void)BSL_UIO_ClearFlags(uio, BSL_UIO_FLAGS_RWS | BSL_UIO_FLAGS_SHOULD_RETRY);
*readLen = 0;
UIO_BufMem *ubm = BSL_UIO_GetCtx(uio);
if (ubm == NULL || ubm->buf == NULL) {
BSL_ERR_PUSH_ERROR(BSL_UIO_FAIL);
return BSL_UIO_FAIL;
}
size_t real = (size_t)len;
if (real > ubm->buf->length - ubm->readIndex) {
real = ubm->buf->length - ubm->readIndex;
}
if (buf != NULL && real > 0) {
(void)memcpy_s(buf, len, ubm->buf->data + ubm->readIndex, real);
ubm->readIndex += real;
*readLen = (uint32_t)real;
}
if (*readLen > 0) {
return BSL_SUCCESS;
}
/* when real equals 0, it is necessary to determine whether to retry based on eof */
if (ubm->eof != 0) { // retry if eof is not zero
(void)BSL_UIO_SetFlags(uio, BSL_UIO_FLAGS_READ | BSL_UIO_FLAGS_SHOULD_RETRY);
}
return BSL_SUCCESS;
}
static int32_t MemPending(BSL_UIO *uio, int32_t larg, uint64_t *ret)
{
if (ret == NULL) {
BSL_ERR_PUSH_ERROR(BSL_NULL_INPUT);
return BSL_NULL_INPUT;
}
if (larg != sizeof(uint64_t)) {
BSL_ERR_PUSH_ERROR(BSL_INVALID_ARG);
return BSL_INVALID_ARG;
}
UIO_BufMem *ubm = BSL_UIO_GetCtx(uio);
if (ubm == NULL || ubm->buf == NULL) {
BSL_ERR_PUSH_ERROR(BSL_UIO_FAIL);
return BSL_UIO_FAIL;
}
*ret = (uint64_t)(ubm->buf->length - ubm->readIndex);
return BSL_SUCCESS;
}
static int32_t MemWpending(int32_t larg, uint64_t *ret)
{
if (ret == NULL) {
BSL_ERR_PUSH_ERROR(BSL_NULL_INPUT);
return BSL_NULL_INPUT;
}
if (larg != sizeof(uint64_t)) {
BSL_ERR_PUSH_ERROR(BSL_INVALID_ARG);
return BSL_INVALID_ARG;
}
*ret = 0; // For the UIO of the mem type, return 0
return BSL_SUCCESS;
}
static int32_t MemGetInfo(BSL_UIO *uio, int32_t larg, BSL_UIO_CtrlGetInfoParam *param)
{
if (param == NULL) {
BSL_ERR_PUSH_ERROR(BSL_NULL_INPUT);
return BSL_NULL_INPUT;
}
if (larg != sizeof(BSL_UIO_CtrlGetInfoParam)) {
BSL_ERR_PUSH_ERROR(BSL_INVALID_ARG);
return BSL_INVALID_ARG;
}
UIO_BufMem *ubm = BSL_UIO_GetCtx(uio);
if (ubm == NULL || ubm->buf == NULL) {
BSL_ERR_PUSH_ERROR(BSL_UIO_FAIL);
return BSL_UIO_FAIL;
}
param->data = (uint8_t *)(&ubm->buf->data[ubm->readIndex]);
param->size = ubm->buf->length - ubm->readIndex;
return BSL_SUCCESS;
}
static int32_t MemGetPtr(BSL_UIO *uio, int32_t size, BSL_BufMem **ptr)
{
if (ptr == NULL) {
BSL_ERR_PUSH_ERROR(BSL_NULL_INPUT);
return BSL_NULL_INPUT;
}
if (size != sizeof(BSL_BufMem *)) {
BSL_ERR_PUSH_ERROR(BSL_INVALID_ARG);
return BSL_INVALID_ARG;
}
UIO_BufMem *ubm = BSL_UIO_GetCtx(uio);
if (ubm == NULL) {
BSL_ERR_PUSH_ERROR(BSL_UIO_FAIL);
return BSL_UIO_FAIL;
}
if ((uio->flags & BSL_UIO_FLAGS_MEM_READ_ONLY) == 0) {
if (UioBufMemSync(ubm) != BSL_SUCCESS) {
BSL_ERR_PUSH_ERROR(BSL_MEMMOVE_FAIL);
return BSL_MEMMOVE_FAIL;
}
*ptr = ubm->buf;
} else {
/* when reset to read-only mode, can read from the beginning.
* Temporary buf is not used to manage memory. */
ubm->tmpBuf->data = ubm->buf->data + ubm->readIndex;
ubm->tmpBuf->length = ubm->buf->length - ubm->readIndex;
ubm->tmpBuf->max = ubm->buf->max - ubm->readIndex;
*ptr = ubm->tmpBuf;
}
return BSL_SUCCESS;
}
static int32_t MemSetEof(BSL_UIO *uio, int32_t larg, const int32_t *eof)
{
if (eof == NULL) {
BSL_ERR_PUSH_ERROR(BSL_NULL_INPUT);
return BSL_NULL_INPUT;
}
if (larg != (int32_t)sizeof(int32_t)) {
BSL_ERR_PUSH_ERROR(BSL_INVALID_ARG);
return BSL_INVALID_ARG;
}
UIO_BufMem *ubm = BSL_UIO_GetCtx(uio);
if (ubm == NULL) {
BSL_ERR_PUSH_ERROR(BSL_UIO_FAIL);
return BSL_UIO_FAIL;
}
ubm->eof = *eof;
return BSL_SUCCESS;
}
static int32_t MemGetEof(BSL_UIO *uio, int32_t larg, int32_t *eof)
{
if (eof == NULL) {
BSL_ERR_PUSH_ERROR(BSL_NULL_INPUT);
return BSL_NULL_INPUT;
}
if (larg != (int32_t)sizeof(int32_t)) {
BSL_ERR_PUSH_ERROR(BSL_INVALID_ARG);
return BSL_INVALID_ARG;
}
UIO_BufMem *ubm = BSL_UIO_GetCtx(uio);
if (ubm == NULL) {
BSL_ERR_PUSH_ERROR(BSL_UIO_FAIL);
return BSL_UIO_FAIL;
}
*eof = ubm->eof;
return BSL_SUCCESS;
}
static int32_t MemReset(BSL_UIO *uio)
{
UIO_BufMem *ubm = BSL_UIO_GetCtx(uio);
if (ubm == NULL || ubm->buf == NULL || ubm->buf->data == NULL) {
BSL_ERR_PUSH_ERROR(BSL_NULL_INPUT);
return BSL_NULL_INPUT;
}
if ((uio->flags & BSL_UIO_FLAGS_MEM_READ_ONLY) != 0) {
// Read-only mode: The read index is reset and data can be read again
ubm->readIndex = 0;
} else {
// Read/Write mode: Clear all data
(void)memset_s(ubm->buf->data, ubm->buf->max, 0, ubm->buf->max);
ubm->buf->length = 0;
ubm->readIndex = 0;
}
return BSL_SUCCESS;
}
static int32_t MemFlush(int32_t larg, void *parg)
{
if (parg != NULL || larg != 0) {
BSL_ERR_PUSH_ERROR(BSL_INVALID_ARG);
return BSL_INVALID_ARG;
}
return BSL_SUCCESS;
}
static int32_t MemCtrl(BSL_UIO *uio, int32_t cmd, int32_t larg, void *parg)
{
switch (cmd) {
case BSL_UIO_PENDING:
return MemPending(uio, larg, parg);
case BSL_UIO_MEM_GET_INFO:
return MemGetInfo(uio, larg, parg);
case BSL_UIO_WPENDING:
return MemWpending(larg, parg);
case BSL_UIO_FLUSH:
return MemFlush(larg, parg);
case BSL_UIO_MEM_NEW_BUF:
return MemNewBuf(uio, larg, parg);
case BSL_UIO_MEM_GET_PTR:
return MemGetPtr(uio, larg, parg);
case BSL_UIO_MEM_SET_EOF:
return MemSetEof(uio, larg, parg);
case BSL_UIO_MEM_GET_EOF:
return MemGetEof(uio, larg, parg);
case BSL_UIO_RESET:
return MemReset(uio);
default:
BSL_ERR_PUSH_ERROR(BSL_UIO_FAIL);
return BSL_UIO_FAIL;
}
}
static int32_t MemPuts(BSL_UIO *uio, const char *buf, uint32_t *writeLen)
{
uint32_t len = 0;
if (buf != NULL) {
len = (uint32_t)strlen(buf);
}
return MemWrite(uio, buf, len, writeLen);
}
static int32_t MemGets(BSL_UIO *uio, char *buf, uint32_t *readLen)
{
uint32_t cnt = 0;
int32_t ret;
UIO_BufMem *ubm = BSL_UIO_GetCtx(uio);
if (ubm == NULL || ubm->buf == NULL) {
BSL_ERR_PUSH_ERROR(BSL_NULL_INPUT);
return BSL_NULL_INPUT;
}
if (*readLen == 0) {
return BSL_SUCCESS;
}
uint32_t len = (uint32_t)((*readLen - 1) >= (ubm->buf->length - ubm->readIndex) ?
(ubm->buf->length - ubm->readIndex) : (*readLen - 1));
if (len == 0) { /* No data to read */
*buf = '\0';
*readLen = 0;
return BSL_SUCCESS;
}
char *pre = ubm->buf->data + ubm->readIndex;
/* len greater than 0 */
while (cnt < len) {
cnt++;
if (*pre++ == '\n') {
break;
}
}
ret = MemRead(uio, buf, cnt, readLen);
if (ret == BSL_SUCCESS) {
buf[cnt] = '\0';
}
return ret;
}
static int32_t MemDestroy(BSL_UIO *uio)
{
UIO_BufMem *ubm = BSL_UIO_GetCtx(uio);
if (BSL_UIO_GetIsUnderlyingClosedByUio(uio) && ubm != NULL) {
if ((uio->flags & BSL_UIO_FLAGS_MEM_READ_ONLY) != 0) {
ubm->buf->data = NULL;
if (ubm->tmpBuf != NULL) {
ubm->tmpBuf->data = NULL;
BSL_BufMemFree(ubm->tmpBuf);
}
}
BSL_BufMemFree(ubm->buf);
BSL_SAL_FREE(ubm);
}
BSL_UIO_SetCtx(uio, NULL);
uio->init = false;
return BSL_SUCCESS;
}
static int32_t MemCreate(BSL_UIO *uio)
{
UIO_BufMem *ubm = (UIO_BufMem *)BSL_SAL_Calloc(1, sizeof(UIO_BufMem));
if (ubm == NULL) {
BSL_ERR_PUSH_ERROR(BSL_MALLOC_FAIL);
return BSL_MALLOC_FAIL;
}
ubm->buf = BSL_BufMemNew();
if (ubm->buf == NULL) {
BSL_SAL_FREE(ubm);
BSL_ERR_PUSH_ERROR(BSL_MALLOC_FAIL);
return BSL_MALLOC_FAIL;
}
ubm->eof = -1;
BSL_UIO_SetCtx(uio, ubm);
BSL_UIO_SetIsUnderlyingClosedByUio(uio, true); // memory buffer is created here and will be closed here by default.
uio->init = true;
return BSL_SUCCESS;
}
const BSL_UIO_Method *BSL_UIO_MemMethod(void)
{
static const BSL_UIO_Method method = {
BSL_UIO_MEM,
MemWrite,
MemRead,
MemCtrl,
MemPuts,
MemGets,
MemCreate,
MemDestroy
};
return &method;
}
#endif /* HITLS_BSL_UIO_MEM */
| 2302_82127028/openHiTLS-examples_1508 | bsl/uio/src/uio_mem.c | C | unknown | 12,802 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#include "hitls_build.h"
#ifdef HITLS_BSL_UIO_SCTP
#include "securec.h"
#include "bsl_sal.h"
#include "bsl_binlog_id.h"
#include "bsl_log_internal.h"
#include "bsl_log.h"
#include "bsl_err_internal.h"
#include "bsl_errno.h"
#include "bsl_uio.h"
#include "sal_net.h"
#include "uio_base.h"
#include "uio_abstraction.h"
#define SCTP_SHARE_AUTHKEY_ID_MAX 65535
typedef struct {
bool peerAuthed; /* Whether auth is enabled at the peer end */
/* Whether authkey is added: If authkey is added but not active, success is returned when authkey is added again. */
bool isAddAuthkey;
bool reserved[2]; /* Four-byte alignment is reserved. */
uint16_t sendAppStreamId; /* ID of the stream sent by the user-specified app. */
uint16_t prevShareKeyId;
uint16_t shareKeyId;
uint16_t reserved1; /* Four-byte alignment is reserved. */
} BslSctpData;
typedef struct {
BslSctpData data;
int32_t fd; // Network socket
uint32_t ipLen;
uint8_t ip[IP_ADDR_MAX_LEN];
struct BSL_UIO_MethodStruct method;
bool isAppMsg; // whether the message sent is the app message
} SctpParameters;
static int32_t BslSctpNew(BSL_UIO *uio)
{
SctpParameters *parameters = (SctpParameters *)BSL_SAL_Calloc(1u, sizeof(SctpParameters));
if (parameters == NULL) {
BSL_LOG_BINLOG_FIXLEN(BINLOG_ID05031, BSL_LOG_LEVEL_ERR, BSL_LOG_BINLOG_TYPE_RUN,
"Uio: sctp param malloc fail.", 0, 0, 0, 0);
BSL_ERR_PUSH_ERROR(BSL_UIO_FAIL);
return BSL_UIO_FAIL;
}
parameters->fd = -1;
parameters->method.uioType = BSL_UIO_SCTP;
uio->ctx = parameters;
uio->ctxLen = sizeof(SctpParameters);
// The default value of init is 0. Set the value of init to 1 after the fd is set.
return BSL_SUCCESS;
}
static int32_t BslSctpDestroy(BSL_UIO *uio)
{
if (uio == NULL) {
return BSL_SUCCESS;
}
SctpParameters *ctx = BSL_UIO_GetCtx(uio);
uio->init = 0;
if (ctx != NULL) {
if (BSL_UIO_GetIsUnderlyingClosedByUio(uio) && ctx->fd != -1) {
(void)BSL_SAL_SockClose(ctx->fd);
}
BSL_SAL_FREE(ctx);
BSL_UIO_SetCtx(uio, NULL);
}
return BSL_SUCCESS;
}
static int32_t BslSctpWrite(BSL_UIO *uio, const void *buf, uint32_t len, uint32_t *writeLen)
{
if (uio == NULL || uio->ctx == NULL || ((SctpParameters *)uio->ctx)->method.uioWrite == NULL) {
BSL_ERR_PUSH_ERROR(BSL_INVALID_ARG);
BSL_LOG_BINLOG_FIXLEN(BINLOG_ID05081, BSL_LOG_LEVEL_ERR, BSL_LOG_BINLOG_TYPE_RUN,
"Uio: Sctp write input error.", 0, 0, 0, 0);
return BSL_INVALID_ARG;
}
*writeLen = 0;
return ((SctpParameters *)uio->ctx)->method.uioWrite(uio, buf, len, writeLen);
}
static int32_t BslSctpRead(BSL_UIO *uio, void *buf, uint32_t len, uint32_t *readLen)
{
if (uio == NULL || uio->ctx == NULL || ((SctpParameters *)uio->ctx)->method.uioRead == NULL) {
BSL_ERR_PUSH_ERROR(BSL_INVALID_ARG);
BSL_LOG_BINLOG_FIXLEN(BINLOG_ID05082, BSL_LOG_LEVEL_ERR, BSL_LOG_BINLOG_TYPE_RUN,
"Uio: Sctp read input error.", 0, 0, 0, 0);
return BSL_INVALID_ARG;
}
*readLen = 0;
SctpParameters *parameters = (SctpParameters *)uio->ctx;
if (!parameters->data.peerAuthed) {
if (parameters->method.uioCtrl == NULL || parameters->method.uioCtrl(uio, BSL_UIO_SCTP_CHECK_PEER_AUTH,
sizeof(parameters->data.peerAuthed), ¶meters->data.peerAuthed) != BSL_SUCCESS) {
BSL_ERR_PUSH_ERROR(BSL_UIO_IO_EXCEPTION);
BSL_LOG_BINLOG_FIXLEN(BINLOG_ID05083, BSL_LOG_LEVEL_ERR, BSL_LOG_BINLOG_TYPE_RUN,
"Uio: Check peer auth failed.", 0, 0, 0, 0);
return BSL_UIO_IO_EXCEPTION;
}
parameters->data.peerAuthed = true;
}
return parameters->method.uioRead(uio, buf, len, readLen);
}
static int32_t BslSctpAddAuthKey(BSL_UIO *uio, const uint8_t *parg, uint16_t larg)
{
SctpParameters *parameters = (SctpParameters *)BSL_UIO_GetCtx(uio);
if (parg == NULL || larg != sizeof(BSL_UIO_SctpAuthKey)) {
BSL_ERR_PUSH_ERROR(BSL_INVALID_ARG);
BSL_LOG_BINLOG_FIXLEN(BINLOG_ID05062, BSL_LOG_LEVEL_ERR, BSL_LOG_BINLOG_TYPE_RUN,
"add auth key failed", 0, 0, 0, 0);
return BSL_INVALID_ARG;
}
if (parameters->data.isAddAuthkey) {
return BSL_SUCCESS;
}
uint16_t prevShareKeyId = parameters->data.shareKeyId;
if (parameters->data.shareKeyId >= SCTP_SHARE_AUTHKEY_ID_MAX) {
parameters->data.shareKeyId = 1;
} else {
parameters->data.shareKeyId++;
}
BSL_UIO_SctpAuthKey key = { 0 };
key.shareKeyId = parameters->data.shareKeyId;
key.authKey = parg;
key.authKeySize = larg;
int32_t ret = parameters->method.uioCtrl(uio, BSL_UIO_SCTP_ADD_AUTH_SHARED_KEY, (int32_t)sizeof(key), &key);
if (ret != BSL_SUCCESS) {
parameters->data.shareKeyId = prevShareKeyId;
BSL_ERR_PUSH_ERROR(BSL_UIO_IO_EXCEPTION);
BSL_LOG_BINLOG_FIXLEN(BINLOG_ID05065, BSL_LOG_LEVEL_ERR, BSL_LOG_BINLOG_TYPE_RUN,
"add auth key failed", 0, 0, 0, 0);
return BSL_UIO_IO_EXCEPTION;
}
parameters->data.isAddAuthkey = true;
parameters->data.prevShareKeyId = prevShareKeyId;
return BSL_SUCCESS;
}
static int32_t BslSctpActiveAuthKey(BSL_UIO *uio)
{
SctpParameters *parameters = BSL_UIO_GetCtx(uio);
if (parameters == NULL || parameters->method.uioCtrl == NULL) {
BSL_ERR_PUSH_ERROR(BSL_NULL_INPUT);
return BSL_NULL_INPUT;
}
uint16_t shareKeyId = parameters->data.shareKeyId;
int32_t ret = parameters->method.uioCtrl(uio, BSL_UIO_SCTP_ACTIVE_AUTH_SHARED_KEY,
(int32_t)sizeof(shareKeyId), &shareKeyId);
if (ret != BSL_SUCCESS) {
BSL_ERR_PUSH_ERROR(BSL_UIO_IO_EXCEPTION);
BSL_LOG_BINLOG_FIXLEN(BINLOG_ID05066, BSL_LOG_LEVEL_ERR, BSL_LOG_BINLOG_TYPE_RUN,
"active auth key failed", 0, 0, 0, 0);
return BSL_UIO_IO_EXCEPTION;
}
parameters->data.isAddAuthkey = false;
return BSL_SUCCESS;
}
static int32_t BslSctpDelPreAuthKey(BSL_UIO *uio)
{
SctpParameters *parameters = BSL_UIO_GetCtx(uio);
if (parameters == NULL || parameters->method.uioCtrl == NULL) {
BSL_ERR_PUSH_ERROR(BSL_NULL_INPUT);
return BSL_NULL_INPUT;
}
uint16_t delShareKeyId = parameters->data.prevShareKeyId;
int32_t ret = parameters->method.uioCtrl(uio, BSL_UIO_SCTP_DEL_PRE_AUTH_SHARED_KEY,
(int32_t)sizeof(delShareKeyId), &delShareKeyId);
if (ret != BSL_SUCCESS) {
BSL_ERR_PUSH_ERROR(BSL_UIO_IO_EXCEPTION);
BSL_LOG_BINLOG_FIXLEN(BINLOG_ID05067, BSL_LOG_LEVEL_ERR, BSL_LOG_BINLOG_TYPE_RUN,
"del pre auth key failed", 0, 0, 0, 0);
return BSL_UIO_IO_EXCEPTION;
}
return BSL_SUCCESS;
}
static int32_t BslSctpIsSndBuffEmpty(BSL_UIO *uio, void *parg, int32_t larg)
{
SctpParameters *parameters = BSL_UIO_GetCtx(uio);
if (parameters == NULL || parameters->method.uioCtrl == NULL || parg == NULL || larg != sizeof(bool)) {
BSL_ERR_PUSH_ERROR(BSL_NULL_INPUT);
return BSL_NULL_INPUT;
}
uint8_t isEmpty = 0;
if (parameters->method.uioCtrl(uio, BSL_UIO_SCTP_SND_BUFF_IS_EMPTY,
(int32_t)sizeof(uint8_t), &isEmpty) != BSL_SUCCESS) {
BSL_ERR_PUSH_ERROR(BSL_UIO_IO_EXCEPTION);
BSL_LOG_BINLOG_FIXLEN(BINLOG_ID05068, BSL_LOG_LEVEL_ERR, BSL_LOG_BINLOG_TYPE_RUN,
"get sctp status failed", 0, 0, 0, 0);
return BSL_UIO_IO_EXCEPTION;
}
*(bool *)parg = (isEmpty > 0);
return BSL_SUCCESS;
}
static int32_t BslSctpGetSendStreamId(const SctpParameters *parameters, void *parg, int32_t larg)
{
if (larg != (int32_t)sizeof(uint16_t) || parg == NULL) {
BSL_LOG_BINLOG_FIXLEN(BINLOG_ID05046, BSL_LOG_LEVEL_ERR, BSL_LOG_BINLOG_TYPE_RUN,
"Uio: Sctp input err.", 0, 0, 0, 0);
BSL_ERR_PUSH_ERROR(BSL_NULL_INPUT);
return BSL_NULL_INPUT;
}
uint16_t *sendStreamId = (uint16_t *)parg;
if (parameters->isAppMsg) {
*sendStreamId = parameters->data.sendAppStreamId;
} else {
*sendStreamId = 0;
}
BSL_LOG_BINLOG_FIXLEN(BINLOG_ID05047, BSL_LOG_LEVEL_DEBUG, BSL_LOG_BINLOG_TYPE_RUN,
"Uio: User Get SCTP send StreamId [%hu].", *sendStreamId, 0, 0, 0);
return BSL_SUCCESS;
}
int32_t BslSctpSetAppStreamId(SctpParameters *parameters, const void *parg, int32_t larg)
{
if (larg != (int32_t)sizeof(uint16_t) || parg == NULL) {
BSL_LOG_BINLOG_FIXLEN(BINLOG_ID05048, BSL_LOG_LEVEL_ERR, BSL_LOG_BINLOG_TYPE_RUN,
"Uio: Sctp input err.", 0, 0, 0, 0);
BSL_ERR_PUSH_ERROR(BSL_NULL_INPUT);
return BSL_NULL_INPUT;
}
parameters->data.sendAppStreamId = *(const uint16_t *)parg;
BSL_LOG_BINLOG_FIXLEN(BINLOG_ID05055, BSL_LOG_LEVEL_DEBUG, BSL_LOG_BINLOG_TYPE_RUN,
"Uio: User set SCTP AppStreamId [%hu].", parameters->data.sendAppStreamId, 0, 0, 0);
return BSL_SUCCESS;
}
static int32_t BslSctpSetPeerIpAddr(SctpParameters *parameters, const uint8_t *addr, int32_t size)
{
if (addr == NULL) {
BSL_LOG_BINLOG_FIXLEN(BINLOG_ID05049, BSL_LOG_LEVEL_ERR, BSL_LOG_BINLOG_TYPE_RUN,
"Uio: NULL error.", 0, 0, 0, 0);
BSL_ERR_PUSH_ERROR(BSL_NULL_INPUT);
return BSL_NULL_INPUT;
}
if (size != IP_ADDR_V4_LEN && size != IP_ADDR_V6_LEN) {
BSL_ERR_PUSH_ERROR(BSL_UIO_FAIL);
BSL_LOG_BINLOG_FIXLEN(BINLOG_ID05050, BSL_LOG_LEVEL_ERR, BSL_LOG_BINLOG_TYPE_RUN,
"Uio: Set peer ip address input error.", 0, 0, 0, 0);
return BSL_UIO_FAIL;
}
(void)memcpy_s(parameters->ip, sizeof(parameters->ip), addr, size);
parameters->ipLen = (uint32_t)size;
return BSL_SUCCESS;
}
static int32_t BslSctpGetPeerIpAddr(SctpParameters *parameters, void *parg, int32_t larg)
{
BSL_UIO_CtrlGetPeerIpAddrParam *para = (BSL_UIO_CtrlGetPeerIpAddrParam *)parg;
if (parg == NULL || larg != (int32_t)sizeof(BSL_UIO_CtrlGetPeerIpAddrParam) ||
para->addr == NULL) {
BSL_LOG_BINLOG_FIXLEN(BINLOG_ID05051, BSL_LOG_LEVEL_ERR, BSL_LOG_BINLOG_TYPE_RUN,
"Uio: Get peer ip address input error.", 0, 0, 0, 0);
return BSL_NULL_INPUT;
}
/* Check whether the IP address is set. */
if (parameters->ipLen == 0) {
BSL_ERR_PUSH_ERROR(BSL_UIO_FAIL);
BSL_LOG_BINLOG_FIXLEN(BINLOG_ID05052, BSL_LOG_LEVEL_ERR, BSL_LOG_BINLOG_TYPE_RUN,
"Uio: Ip address is already existed.", 0, 0, 0, 0);
return BSL_UIO_FAIL;
}
if (para->size < parameters->ipLen) {
BSL_ERR_PUSH_ERROR(BSL_UIO_FAIL);
BSL_LOG_BINLOG_FIXLEN(BINLOG_ID05053, BSL_LOG_LEVEL_ERR, BSL_LOG_BINLOG_TYPE_RUN,
"Uio: Ip address length err.", 0, 0, 0, 0);
return BSL_UIO_FAIL;
}
(void)memcpy_s(para->addr, para->size, parameters->ip, parameters->ipLen);
para->size = parameters->ipLen;
return BSL_SUCCESS;
}
static int32_t BslSctpSetFd(BSL_UIO *uio, void *parg, int32_t larg)
{
if (larg != (int32_t)sizeof(int32_t) || parg == NULL) {
BSL_ERR_PUSH_ERROR(BSL_INVALID_ARG);
return BSL_INVALID_ARG;
}
int32_t *fd = (int32_t *)parg;
SctpParameters *parameters = BSL_UIO_GetCtx(uio);
if (parameters->fd != -1) {
if (BSL_UIO_GetIsUnderlyingClosedByUio(uio)) {
(void)BSL_SAL_SockClose(parameters->fd);
}
}
parameters->fd = *fd;
uio->init = true;
return BSL_SUCCESS;
}
static int32_t BslSctpGetFd(SctpParameters *parameters, void *parg, int32_t larg)
{
if (larg != (int32_t)sizeof(int32_t) || parg == NULL) {
BSL_ERR_PUSH_ERROR(BSL_INVALID_ARG);
BSL_LOG_BINLOG_FIXLEN(BINLOG_ID05054, BSL_LOG_LEVEL_ERR, BSL_LOG_BINLOG_TYPE_RUN,
"get fd handle invalid parameter.", 0, 0, 0, 0);
return BSL_INVALID_ARG;
}
*(int32_t *)parg = parameters->fd;
return BSL_SUCCESS;
}
static int32_t BslSctpMaskAppMsg(SctpParameters *parameters, void *parg, int32_t larg)
{
if (parg == NULL || larg != sizeof(bool)) {
BSL_ERR_PUSH_ERROR(BSL_INVALID_ARG);
BSL_LOG_BINLOG_FIXLEN(BINLOG_ID05030, BSL_LOG_LEVEL_ERR, BSL_LOG_BINLOG_TYPE_RUN,
"mask app msg failed", 0, 0, 0, 0);
return BSL_INVALID_ARG;
}
parameters->isAppMsg = *(bool *)parg;
return BSL_SUCCESS;
}
static int32_t BslSctpSetCtxCb(SctpParameters *parameters, int32_t type, void *func)
{
if (parameters == NULL || func == NULL) {
BSL_ERR_PUSH_ERROR(BSL_NULL_INPUT);
return BSL_NULL_INPUT;
}
switch (type) {
case BSL_UIO_WRITE_CB:
parameters->method.uioWrite = func;
break;
case BSL_UIO_READ_CB:
parameters->method.uioRead = func;
break;
case BSL_UIO_CTRL_CB:
parameters->method.uioCtrl = func;
break;
default:
BSL_ERR_PUSH_ERROR(BSL_INVALID_ARG);
return BSL_INVALID_ARG;
}
return BSL_SUCCESS;
}
int32_t BslSctpCtrl(BSL_UIO *uio, int32_t cmd, int32_t larg, void *parg)
{
if (uio->ctx == NULL) {
return BSL_NULL_INPUT;
}
SctpParameters *parameters = BSL_UIO_GetCtx(uio);
switch (cmd) {
case BSL_UIO_SET_PEER_IP_ADDR:
return BslSctpSetPeerIpAddr(parameters, parg, larg);
case BSL_UIO_GET_PEER_IP_ADDR:
return BslSctpGetPeerIpAddr(parameters, parg, larg);
case BSL_UIO_SET_FD:
return BslSctpSetFd(uio, parg, larg);
case BSL_UIO_GET_FD:
return BslSctpGetFd(parameters, parg, larg);
case BSL_UIO_SCTP_GET_SEND_STREAM_ID:
return BslSctpGetSendStreamId(parameters, parg, larg);
case BSL_UIO_SCTP_SET_APP_STREAM_ID:
return BslSctpSetAppStreamId(parameters, parg, larg);
case BSL_UIO_SCTP_ADD_AUTH_SHARED_KEY:
if (larg < 0 || larg > UINT16_MAX) {
break;
}
return BslSctpAddAuthKey(uio, parg, larg);
case BSL_UIO_SCTP_ACTIVE_AUTH_SHARED_KEY:
return BslSctpActiveAuthKey(uio);
case BSL_UIO_SCTP_DEL_PRE_AUTH_SHARED_KEY:
return BslSctpDelPreAuthKey(uio);
case BSL_UIO_SCTP_MASK_APP_MESSAGE:
return BslSctpMaskAppMsg(parameters, parg, larg);
case BSL_UIO_SCTP_SND_BUFF_IS_EMPTY:
return BslSctpIsSndBuffEmpty(uio, parg, larg);
case BSL_UIO_SCTP_SET_CALLBACK:
return BslSctpSetCtxCb(parameters, larg, parg);
case BSL_UIO_FLUSH:
return BSL_SUCCESS;
default:
break;
}
BSL_ERR_PUSH_ERROR(BSL_INVALID_ARG);
BSL_LOG_BINLOG_FIXLEN(BINLOG_ID05069, BSL_LOG_LEVEL_ERR, BSL_LOG_BINLOG_TYPE_RUN,
"invalid args", 0, 0, 0, 0);
return BSL_INVALID_ARG;
}
const BSL_UIO_Method *BSL_UIO_SctpMethod(void)
{
static const BSL_UIO_Method method = {
BSL_UIO_SCTP,
BslSctpWrite,
BslSctpRead,
BslSctpCtrl,
NULL,
NULL,
BslSctpNew,
BslSctpDestroy
};
return &method;
}
#endif /* HITLS_BSL_UIO_SCTP */
| 2302_82127028/openHiTLS-examples_1508 | bsl/uio/src/uio_sctp.c | C | unknown | 15,749 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#include "hitls_build.h"
#ifdef HITLS_BSL_UIO_TCP
#include "bsl_binlog_id.h"
#include "bsl_err_internal.h"
#include "bsl_log_internal.h"
#include "bsl_log.h"
#include "bsl_sal.h"
#include "bsl_errno.h"
#include "sal_net.h"
#include "uio_base.h"
#include "uio_abstraction.h"
typedef struct {
int32_t fd;
} TcpPrameters;
static int32_t TcpNew(BSL_UIO *uio)
{
if (uio->ctx != NULL) {
BSL_LOG_BINLOG_FIXLEN(BINLOG_ID05056, BSL_LOG_LEVEL_ERR, BSL_LOG_BINLOG_TYPE_RUN,
"Uio: ctx is already existed.", 0, 0, 0, 0);
BSL_ERR_PUSH_ERROR(BSL_UIO_FAIL);
return BSL_UIO_FAIL;
}
TcpPrameters *parameters = (TcpPrameters *)BSL_SAL_Calloc(1u, sizeof(TcpPrameters));
if (parameters == NULL) {
BSL_LOG_BINLOG_FIXLEN(BINLOG_ID05057, BSL_LOG_LEVEL_ERR, BSL_LOG_BINLOG_TYPE_RUN,
"Uio: tcp param malloc fail.", 0, 0, 0, 0);
BSL_ERR_PUSH_ERROR(BSL_UIO_FAIL);
return BSL_UIO_FAIL;
}
parameters->fd = -1;
uio->ctx = parameters;
uio->ctxLen = sizeof(TcpPrameters);
// Specifies whether to be closed by uio when setting fd.
// The default value of init is 0. Set the value of init to 1 after the fd is set.
return BSL_SUCCESS;
}
static int32_t TcpSocketDestroy(BSL_UIO *uio)
{
if (uio == NULL) {
return BSL_SUCCESS;
}
uio->init = 0;
TcpPrameters *ctx = BSL_UIO_GetCtx(uio);
if (ctx != NULL) {
if (BSL_UIO_GetIsUnderlyingClosedByUio(uio) && ctx->fd != -1) {
(void)BSL_SAL_SockClose(ctx->fd);
}
BSL_SAL_FREE(ctx);
BSL_UIO_SetCtx(uio, NULL);
}
return BSL_SUCCESS;
}
static int32_t TcpSocketWrite(BSL_UIO *uio, const void *buf, uint32_t len, uint32_t *writeLen)
{
*writeLen = 0;
int32_t err = 0;
int32_t fd = BSL_UIO_GetFd(uio);
if (fd < 0) {
BSL_ERR_PUSH_ERROR(BSL_UIO_IO_EXCEPTION);
return BSL_UIO_IO_EXCEPTION;
}
int32_t ret = SAL_Write(fd, buf, len, &err);
(void)BSL_UIO_ClearFlags(uio, BSL_UIO_FLAGS_RWS | BSL_UIO_FLAGS_SHOULD_RETRY);
if (ret > 0) {
*writeLen = (uint32_t)ret;
return BSL_SUCCESS;
}
// If the value of ret is less than or equal to 0, check errno first.
if (UioIsNonFatalErr(err)) { // Indicates the errno for determining whether retry is allowed.
(void)BSL_UIO_SetFlags(uio, BSL_UIO_FLAGS_WRITE | BSL_UIO_FLAGS_SHOULD_RETRY);
return BSL_SUCCESS;
}
BSL_ERR_PUSH_ERROR(BSL_UIO_IO_EXCEPTION);
return BSL_UIO_IO_EXCEPTION;
}
static int32_t TcpSocketRead(BSL_UIO *uio, void *buf, uint32_t len, uint32_t *readLen)
{
*readLen = 0;
int32_t err = 0;
(void)BSL_UIO_ClearFlags(uio, BSL_UIO_FLAGS_RWS | BSL_UIO_FLAGS_SHOULD_RETRY);
int32_t fd = BSL_UIO_GetFd(uio);
if (fd < 0) {
BSL_ERR_PUSH_ERROR(BSL_UIO_IO_EXCEPTION);
return BSL_UIO_IO_EXCEPTION;
}
int32_t ret = SAL_Read(fd, buf, len, &err);
if (ret > 0) { // Success
*readLen = (uint32_t)ret;
return BSL_SUCCESS;
}
// If the value of ret is less than or equal to 0, check errno first.
if (UioIsNonFatalErr(err)) { // Indicates the errno for determining whether retry is allowed.
(void)BSL_UIO_SetFlags(uio, BSL_UIO_FLAGS_READ | BSL_UIO_FLAGS_SHOULD_RETRY);
return BSL_SUCCESS;
}
if (ret == 0) {
BSL_ERR_PUSH_ERROR(BSL_UIO_IO_EOF);
return BSL_UIO_IO_EOF;
}
BSL_ERR_PUSH_ERROR(BSL_UIO_IO_EXCEPTION);
return BSL_UIO_IO_EXCEPTION;
}
static int32_t TcpSetFd(BSL_UIO *uio, int32_t size, const int32_t *fd)
{
bool invalid = (fd == NULL) || (uio == NULL);
if (invalid) {
BSL_ERR_PUSH_ERROR(BSL_NULL_INPUT);
return BSL_NULL_INPUT;
}
if (size != (int32_t)sizeof(*fd)) {
BSL_ERR_PUSH_ERROR(BSL_INVALID_ARG);
return BSL_INVALID_ARG;
}
TcpPrameters *ctx = BSL_UIO_GetCtx(uio);
if (ctx == NULL) {
BSL_ERR_PUSH_ERROR(BSL_NULL_INPUT);
return BSL_NULL_INPUT;
}
if (ctx->fd != -1) {
if (BSL_UIO_GetIsUnderlyingClosedByUio(uio)) {
(void)BSL_SAL_SockClose(ctx->fd);
}
}
ctx->fd = *fd;
uio->init = 1;
return BSL_SUCCESS;
}
static int32_t TcpGetFd(BSL_UIO *uio, int32_t size, int32_t *fd)
{
bool invalid = uio == NULL || fd == NULL;
if (invalid) {
BSL_ERR_PUSH_ERROR(BSL_NULL_INPUT);
return BSL_NULL_INPUT;
}
if (size != (int32_t)sizeof(*fd)) {
BSL_ERR_PUSH_ERROR(BSL_INVALID_ARG);
return BSL_INVALID_ARG;
}
TcpPrameters *ctx = BSL_UIO_GetCtx(uio);
if (ctx == NULL) {
BSL_ERR_PUSH_ERROR(BSL_NULL_INPUT);
return BSL_NULL_INPUT;
}
*fd = ctx->fd;
return BSL_SUCCESS;
}
static int32_t TcpSocketCtrl(BSL_UIO *uio, int32_t cmd, int32_t larg, void *parg)
{
switch (cmd) {
case BSL_UIO_SET_FD:
return TcpSetFd(uio, larg, parg);
case BSL_UIO_GET_FD:
return TcpGetFd(uio, larg, parg);
case BSL_UIO_FLUSH:
return BSL_SUCCESS;
default:
break;
}
BSL_ERR_PUSH_ERROR(BSL_UIO_FAIL);
return BSL_UIO_FAIL;
}
const BSL_UIO_Method *BSL_UIO_TcpMethod(void)
{
static const BSL_UIO_Method method = {
BSL_UIO_TCP,
TcpSocketWrite,
TcpSocketRead,
TcpSocketCtrl,
NULL,
NULL,
TcpNew,
TcpSocketDestroy
};
return &method;
}
#endif /* HITLS_BSL_UIO_TCP */
| 2302_82127028/openHiTLS-examples_1508 | bsl/uio/src/uio_tcp.c | C | unknown | 6,013 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#include "hitls_build.h"
#ifdef HITLS_BSL_UIO_UDP
#include "securec.h"
#include "bsl_sal.h"
#include "bsl_binlog_id.h"
#include "bsl_log_internal.h"
#include "bsl_log.h"
#include "bsl_err_internal.h"
#include "bsl_errno.h"
#include "bsl_uio.h"
#include "sal_net.h"
#include "uio_base.h"
#include "uio_abstraction.h"
#define IPV4_WITH_UDP_HEADER_LEN 28 /* IPv4 protocol header 20 + UDP header 8 */
#define IPV6_WITH_UDP_HEADER_LEN 48 /* IPv6 protocol header 40 + UDP header 8 */
typedef struct {
BSL_SAL_SockAddr peer;
int32_t fd; // Network socket
uint32_t connected;
int32_t sysErrno;
} UdpParameters;
static int32_t UdpNew(BSL_UIO *uio)
{
if (uio->ctx != NULL) {
BSL_LOG_BINLOG_FIXLEN(BINLOG_ID05056, BSL_LOG_LEVEL_ERR, BSL_LOG_BINLOG_TYPE_RUN,
"Uio: ctx is already existed.", 0, 0, 0, 0);
BSL_ERR_PUSH_ERROR(BSL_UIO_FAIL);
return BSL_UIO_FAIL;
}
UdpParameters *parameters = (UdpParameters *)BSL_SAL_Calloc(1u, sizeof(UdpParameters));
if (parameters == NULL) {
BSL_LOG_BINLOG_FIXLEN(BINLOG_ID05073, BSL_LOG_LEVEL_ERR, BSL_LOG_BINLOG_TYPE_RUN,
"Uio: udp param malloc fail.", 0, 0, 0, 0);
BSL_ERR_PUSH_ERROR(BSL_UIO_FAIL);
return BSL_UIO_FAIL;
}
int32_t ret = SAL_SockAddrNew(&(parameters->peer));
if (ret != BSL_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
BSL_SAL_Free(parameters);
return ret;
}
parameters->fd = -1;
parameters->connected = 0;
uio->ctx = parameters;
uio->ctxLen = sizeof(UdpParameters);
// Specifies whether to be closed by uio when setting fd.
// The default value of init is 0. Set the value of init to 1 after the fd is set.
return BSL_SUCCESS;
}
static int32_t UdpSocketDestroy(BSL_UIO *uio)
{
if (uio == NULL) {
return BSL_SUCCESS;
}
UdpParameters *ctx = BSL_UIO_GetCtx(uio);
if (ctx != NULL) {
if (BSL_UIO_GetIsUnderlyingClosedByUio(uio) && ctx->fd != -1) {
(void)BSL_SAL_SockClose(ctx->fd);
}
SAL_SockAddrFree(ctx->peer);
BSL_SAL_Free(ctx);
BSL_UIO_SetCtx(uio, NULL);
}
uio->init = false;
return BSL_SUCCESS;
}
static int32_t UdpGetPeerIpAddr(UdpParameters *parameters, int32_t larg, uint8_t *parg)
{
uint32_t uniAddrSize = SAL_SockAddrSize(parameters->peer);
if (parg == NULL || (uint32_t)larg < uniAddrSize) {
BSL_LOG_BINLOG_FIXLEN(BINLOG_ID05074, BSL_LOG_LEVEL_ERR, BSL_LOG_BINLOG_TYPE_RUN,
"Uio: Get peer ip address input error.", 0, 0, 0, 0);
return BSL_NULL_INPUT;
}
SAL_SockAddrCopy(parg, parameters->peer);
return BSL_SUCCESS;
}
static int32_t UdpSetPeerIpAddr(UdpParameters *parameters, const uint8_t *addr, uint32_t size)
{
uint32_t uniAddrSize = SAL_SockAddrSize(parameters->peer);
if (addr == NULL || uniAddrSize == 0 || size > uniAddrSize) {
BSL_LOG_BINLOG_FIXLEN(BINLOG_ID05073, BSL_LOG_LEVEL_ERR, BSL_LOG_BINLOG_TYPE_RUN,
"Uio: NULL error.", 0, 0, 0, 0);
BSL_ERR_PUSH_ERROR(BSL_NULL_INPUT);
return BSL_NULL_INPUT;
}
SAL_SockAddrCopy(parameters->peer, (BSL_SAL_SockAddr)(uintptr_t)addr);
return BSL_SUCCESS;
}
static int32_t UdpSetFd(BSL_UIO *uio, int32_t size, const int32_t *fd)
{
if (fd == NULL) {
BSL_ERR_PUSH_ERROR(BSL_NULL_INPUT);
return BSL_NULL_INPUT;
}
if (size != (int32_t)sizeof(*fd)) {
BSL_ERR_PUSH_ERROR(BSL_INVALID_ARG);
return BSL_INVALID_ARG;
}
UdpParameters *udpCtx = BSL_UIO_GetCtx(uio); // ctx is not NULL
if (udpCtx->fd != -1) {
if (BSL_UIO_GetIsUnderlyingClosedByUio(uio)) {
(void)BSL_SAL_SockClose(udpCtx->fd);
}
}
udpCtx->fd = *fd;
uio->init = true;
return BSL_SUCCESS;
}
static int32_t UdpGetMtuOverhead(UdpParameters *parameters, int32_t size, uint8_t *overhead)
{
if (overhead == NULL) {
BSL_ERR_PUSH_ERROR(BSL_NULL_INPUT);
return BSL_NULL_INPUT;
}
if (size != (int32_t)sizeof(uint8_t)) {
BSL_ERR_PUSH_ERROR(BSL_INVALID_ARG);
return BSL_INVALID_ARG;
}
switch (SAL_SockAddrGetFamily(parameters->peer)) {
case SAL_IPV4:
/* 20 for ipv4, 8 for udp */
*overhead = IPV4_WITH_UDP_HEADER_LEN;
break;
case SAL_IPV6:
*overhead = IPV6_WITH_UDP_HEADER_LEN;
break;
default:
*overhead = IPV4_WITH_UDP_HEADER_LEN;
break;
}
return BSL_SUCCESS;
}
static int32_t UdpQueryMtu(UdpParameters *parameters, int32_t size, uint32_t *mtu)
{
if (mtu == NULL) {
BSL_ERR_PUSH_ERROR(BSL_NULL_INPUT);
return BSL_NULL_INPUT;
}
if (size != (int32_t)sizeof(uint32_t)) {
BSL_ERR_PUSH_ERROR(BSL_INVALID_ARG);
return BSL_INVALID_ARG;
}
int32_t socketVal = 0;
int32_t socketOptLen = sizeof(socketVal);
switch (SAL_SockAddrGetFamily(parameters->peer)) {
case SAL_IPV4:
if (BSL_SAL_GetSockopt(parameters->fd, SAL_PROTO_IP_LEVEL, SAL_MTU_OPTION,
(void *)&socketVal, &socketOptLen) != BSL_SUCCESS || socketVal < IPV4_WITH_UDP_HEADER_LEN) {
*mtu = 0;
} else {
*mtu = (uint32_t)socketVal - IPV4_WITH_UDP_HEADER_LEN;
}
break;
case SAL_IPV6:
if (BSL_SAL_GetSockopt(parameters->fd, SAL_PROTO_IPV6_LEVEL, SAL_IPV6_MTU_OPTION,
(void *)&socketVal, &socketOptLen) != BSL_SUCCESS || socketVal < IPV6_WITH_UDP_HEADER_LEN) {
*mtu = 0;
} else {
*mtu = (uint32_t)socketVal - IPV6_WITH_UDP_HEADER_LEN;
}
break;
default:
*mtu = 0;
break;
}
return BSL_SUCCESS;
}
static int32_t UdpIsMtuExceeded(UdpParameters *parameters, int32_t size, bool *exceeded)
{
if (exceeded == NULL) {
BSL_ERR_PUSH_ERROR(BSL_NULL_INPUT);
return BSL_NULL_INPUT;
}
if (size != (int32_t)sizeof(bool)) {
BSL_ERR_PUSH_ERROR(BSL_INVALID_ARG);
return BSL_INVALID_ARG;
}
(void)parameters;
#ifdef EMSGSIZE
if (parameters->sysErrno == EMSGSIZE) {
*exceeded = true;
parameters->sysErrno = 0;
} else {
#endif
*exceeded = false;
#ifdef EMSGSIZE
}
#endif
return BSL_SUCCESS;
}
static int32_t UdpGetFd(BSL_UIO *uio, int32_t size, int32_t *fd)
{
if (fd == NULL) {
BSL_ERR_PUSH_ERROR(BSL_NULL_INPUT);
return BSL_NULL_INPUT;
}
if (size != (int32_t)sizeof(int32_t)) {
BSL_ERR_PUSH_ERROR(BSL_INVALID_ARG);
return BSL_INVALID_ARG;
}
UdpParameters *ctx = BSL_UIO_GetCtx(uio); // ctx is not NULL
*fd = ctx->fd;
return BSL_SUCCESS;
}
int32_t UdpSocketCtrl(BSL_UIO *uio, int32_t cmd, int32_t larg, void *parg)
{
UdpParameters *parameters = BSL_UIO_GetCtx(uio);
if (parameters == NULL) {
BSL_ERR_PUSH_ERROR(BSL_NULL_INPUT);
return BSL_NULL_INPUT;
}
switch (cmd) {
case BSL_UIO_SET_FD:
return UdpSetFd(uio, larg, parg);
case BSL_UIO_GET_FD:
return UdpGetFd(uio, larg, parg);
case BSL_UIO_SET_PEER_IP_ADDR:
return UdpSetPeerIpAddr(parameters, parg, (uint32_t)larg);
case BSL_UIO_GET_PEER_IP_ADDR:
return UdpGetPeerIpAddr(parameters, larg, parg);
case BSL_UIO_UDP_SET_CONNECTED:
if (parg != NULL) {
parameters->connected = 1;
return UdpSetPeerIpAddr(parameters, parg, (uint32_t)larg);
} else {
parameters->connected = 0;
return BSL_SUCCESS;
}
case BSL_UIO_UDP_GET_MTU_OVERHEAD:
return UdpGetMtuOverhead(parameters, larg, parg);
case BSL_UIO_UDP_QUERY_MTU:
return UdpQueryMtu(parameters, larg, parg);
case BSL_UIO_UDP_MTU_EXCEEDED:
return UdpIsMtuExceeded(parameters, larg, parg);
case BSL_UIO_FLUSH:
return BSL_SUCCESS;
default:
break;
}
BSL_ERR_PUSH_ERROR(BSL_UIO_FAIL);
return BSL_UIO_FAIL;
}
static int32_t UdpSocketWrite(BSL_UIO *uio, const void *buf, uint32_t len, uint32_t *writeLen)
{
int32_t err = 0;
int32_t sendBytes = 0;
int32_t fd = BSL_UIO_GetFd(uio);
UdpParameters *ctx = (UdpParameters *)BSL_UIO_GetCtx(uio);
if (ctx == NULL || fd < 0) {
BSL_ERR_PUSH_ERROR(BSL_UIO_IO_EXCEPTION);
return BSL_UIO_IO_EXCEPTION;
}
ctx->sysErrno = 0;
uint32_t peerAddrSize = SAL_SockAddrSize(ctx->peer);
if (ctx->connected == 1) {
sendBytes = SAL_Write(fd, buf, len, &err);
} else {
sendBytes = SAL_Sendto(fd, buf, len, 0, ctx->peer, peerAddrSize, &err);
}
(void)BSL_UIO_ClearFlags(uio, BSL_UIO_FLAGS_RWS | BSL_UIO_FLAGS_SHOULD_RETRY);
if (sendBytes < 0) {
/* None-fatal error */
if (UioIsNonFatalErr(err)) {
(void)BSL_UIO_SetFlags(uio, BSL_UIO_FLAGS_WRITE | BSL_UIO_FLAGS_SHOULD_RETRY);
ctx->sysErrno = err;
return BSL_SUCCESS;
}
/* Fatal error */
BSL_ERR_PUSH_ERROR(BSL_UIO_IO_EXCEPTION);
return BSL_UIO_IO_EXCEPTION;
}
*writeLen = (uint32_t)sendBytes;
return BSL_SUCCESS;
}
static int32_t UdpSocketRead(BSL_UIO *uio, void *buf, uint32_t len, uint32_t *readLen)
{
*readLen = 0;
int32_t err = 0;
int32_t fd = BSL_UIO_GetFd(uio);
UdpParameters *ctx = BSL_UIO_GetCtx(uio);
if (ctx == NULL || fd < 0) {
BSL_ERR_PUSH_ERROR(BSL_UIO_IO_EXCEPTION);
return BSL_UIO_IO_EXCEPTION;
}
int32_t addrlen = (int32_t)SAL_SockAddrSize(ctx->peer);
int32_t ret = SAL_RecvFrom(fd, buf, len, 0, ctx->peer, &addrlen, &err);
if (ret < 0) {
if (UioIsNonFatalErr(err) == true) {
(void)BSL_UIO_SetFlags(uio, BSL_UIO_FLAGS_READ | BSL_UIO_FLAGS_SHOULD_RETRY);
return BSL_SUCCESS;
}
/* Fatal error */
BSL_ERR_PUSH_ERROR(BSL_UIO_IO_EXCEPTION);
return BSL_UIO_IO_EXCEPTION;
} else if (ret == 0) {
BSL_ERR_PUSH_ERROR(BSL_UIO_IO_EXCEPTION);
return BSL_UIO_IO_EXCEPTION;
}
*readLen = (uint32_t)ret;
return BSL_SUCCESS;
}
const BSL_UIO_Method *BSL_UIO_UdpMethod(void)
{
static const BSL_UIO_Method method = {
BSL_UIO_UDP,
UdpSocketWrite,
UdpSocketRead,
UdpSocketCtrl,
NULL,
NULL,
UdpNew,
UdpSocketDestroy
};
return &method;
}
#endif /* HITLS_BSL_UIO_UDP */
| 2302_82127028/openHiTLS-examples_1508 | bsl/uio/src/uio_udp.c | C | unknown | 11,217 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#ifndef DECODE_LOCAL_H
#define DECODE_LOCAL_H
#include "hitls_build.h"
#ifdef HITLS_CRYPTO_CODECS
#include "crypt_eal_implprovider.h"
#include "crypt_eal_codecs.h"
#ifdef __cplusplus
extern "C" {
#endif /* __cplusplus */
#define CRYPT_DECODER_STATE_UNTRIED 1
#define CRYPT_DECODER_STATE_TRING 2
#define CRYPT_DECODER_STATE_TRIED 3
#define CRYPT_DECODER_STATE_SUCCESS 4
#define MAX_CRYPT_DECODER_FORMAT_TYPE_STR_LEN 64
/**
* @brief Decoder context structure
*/
typedef struct CRYPT_DECODER_Method {
CRYPT_DECODER_IMPL_NewCtx newCtx; /* New context function */
CRYPT_DECODER_IMPL_SetParam setParam; /* Set parameter function */
CRYPT_DECODER_IMPL_GetParam getParam; /* Get parameter function */
CRYPT_DECODER_IMPL_Decode decode; /* Decode function */
CRYPT_DECODER_IMPL_FreeOutData freeOutData; /* Free output data function */
CRYPT_DECODER_IMPL_FreeCtx freeCtx; /* Free context function */
} CRYPT_DECODER_Method;
struct CRYPT_DecoderCtx {
/* To get the provider manager context when query */
CRYPT_EAL_ProvMgrCtx *providerMgrCtx; /* Provider manager context */
char *attrName; /* Attribute name */
const char *inFormat; /* Input data format */
const char *inType; /* Input data type */
const char *outFormat; /* Output data format */
const char *outType; /* Output data type */
void *decoderCtx; /* Decoder internal context */
CRYPT_DECODER_Method *method; /* Decoder method */
int32_t decoderState; /* Decoder state */
};
typedef struct {
const char *format; /* Data format */
const char *type; /* Data type */
BSL_Param *data; /* Data */
} DataInfo;
typedef struct CRYPT_DECODER_Node {
DataInfo inData; /* Input data */
DataInfo outData; /* Output data */
CRYPT_DECODER_Ctx *decoderCtx; /* Decoder context */
} CRYPT_DECODER_Node;
#define MAX_CRYPT_DECODE_FORMAT_TYPE_SIZE 128
struct CRYPT_DECODER_PoolCtx {
CRYPT_EAL_LibCtx *libCtx; /* EAL library context */
const char *attrName; /* Attribute name */
const char *inputFormat; /* Input data format */
const char *inputType; /* Input data type */
int32_t inputKeyType; /* Input data key type */
BSL_Param *input; /* Input data */
const char *targetFormat; /* Target format */
const char *targetType; /* Target type */
int32_t targetKeyType; /* Target data key type */
BslList *decoders; /* The decoders pool of all provider, the entry is CRYPT_DECODER_Ctx */
BslList *decoderPath; /* The path of the decoder, the entry is CRYPT_DECODER_Node */
};
typedef struct {
char *attrName;
const char *inFormat;
const char *inType;
const char *outFormat;
const char *outType;
} DECODER_AttrInfo;
int32_t CRYPT_DECODE_ParseDecoderAttr(const char *attrName, DECODER_AttrInfo *info);
CRYPT_DECODER_Ctx *CRYPT_DECODE_NewDecoderCtxByMethod(const CRYPT_EAL_Func *funcs, CRYPT_EAL_ProvMgrCtx *mgrCtx,
const char *attrName);
#ifdef __cplusplus
}
#endif /* __cplusplus */
#endif /* HITLS_CRYPTO_CODECS */
#endif /* DECODE_LOCAL_H */ | 2302_82127028/openHiTLS-examples_1508 | codecs/include/decode_local.h | C | unknown | 4,097 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#include "hitls_build.h"
#if defined(HITLS_CRYPTO_CODECS) && defined(HITLS_CRYPTO_PROVIDER)
#include "securec.h"
#include "bsl_sal.h"
#include "bsl_list.h"
#include "bsl_err_internal.h"
#include "crypt_errno.h"
#include "crypt_eal_provider.h"
#include "crypt_eal_implprovider.h"
#include "crypt_provider.h"
#include "crypt_eal_pkey.h"
#include "crypt_eal_codecs.h"
#include "bsl_types.h"
#include "crypt_types.h"
#include "crypt_utils.h"
#include "decode_local.h"
int32_t CRYPT_DECODE_ParseDecoderAttr(const char *attrName, DECODER_AttrInfo *info)
{
char *rest = NULL;
info->inFormat = NULL;
info->inType = NULL;
info->outFormat = NULL;
info->outType = NULL;
info->attrName = (char *)BSL_SAL_Dump(attrName, (uint32_t)strlen(attrName) + 1);
if (info->attrName == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_MEM_ALLOC_FAIL);
return CRYPT_MEM_ALLOC_FAIL;
}
char *token = strtok_s(info->attrName, ",", &rest);
while (token != NULL) {
while (*token == ' ') {
token++;
}
if (strstr(token, "inFormat=") == token) {
info->inFormat = token + strlen("inFormat=");
} else if (strstr(token, "inType=") == token) {
info->inType = token + strlen("inType=");
} else if (strstr(token, "outFormat=") == token) {
info->outFormat = token + strlen("outFormat=");
} else if (strstr(token, "outType=") == token) {
info->outType = token + strlen("outType=");
}
token = strtok_s(NULL, ",", &rest);
}
return CRYPT_SUCCESS;
}
static int32_t SetDecoderMethod(CRYPT_DECODER_Ctx *ctx, const CRYPT_EAL_Func *funcs)
{
int32_t index = 0;
CRYPT_DECODER_Method *method = BSL_SAL_Calloc(1, sizeof(CRYPT_DECODER_Method));
if (method == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_MEM_ALLOC_FAIL);
return CRYPT_MEM_ALLOC_FAIL;
}
while (funcs[index].func != NULL) {
switch (funcs[index].id) {
case CRYPT_DECODER_IMPL_NEWCTX:
method->newCtx = (CRYPT_DECODER_IMPL_NewCtx)funcs[index].func;
break;
case CRYPT_DECODER_IMPL_SETPARAM:
method->setParam = (CRYPT_DECODER_IMPL_SetParam)funcs[index].func;
break;
case CRYPT_DECODER_IMPL_GETPARAM:
method->getParam = (CRYPT_DECODER_IMPL_GetParam)funcs[index].func;
break;
case CRYPT_DECODER_IMPL_DECODE:
method->decode = (CRYPT_DECODER_IMPL_Decode)funcs[index].func;
break;
case CRYPT_DECODER_IMPL_FREEOUTDATA:
method->freeOutData = (CRYPT_DECODER_IMPL_FreeOutData)funcs[index].func;
break;
case CRYPT_DECODER_IMPL_FREECTX:
method->freeCtx = (CRYPT_DECODER_IMPL_FreeCtx)funcs[index].func;
break;
default:
BSL_SAL_Free(method);
BSL_ERR_PUSH_ERROR(CRYPT_PROVIDER_ERR_UNEXPECTED_IMPL);
return CRYPT_PROVIDER_ERR_UNEXPECTED_IMPL;
}
index++;
}
ctx->method = method;
return CRYPT_SUCCESS;
}
CRYPT_DECODER_Ctx *CRYPT_DECODE_NewDecoderCtxByMethod(const CRYPT_EAL_Func *funcs, CRYPT_EAL_ProvMgrCtx *mgrCtx,
const char *attrName)
{
void *provCtx = NULL;
DECODER_AttrInfo attrInfo = {0};
CRYPT_DECODER_Ctx *ctx = BSL_SAL_Calloc(1, sizeof(CRYPT_DECODER_Ctx));
if (ctx == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_MEM_ALLOC_FAIL);
return NULL;
}
int32_t ret = CRYPT_EAL_ProviderCtrl(mgrCtx, CRYPT_PROVIDER_GET_USER_CTX, &provCtx, sizeof(provCtx));
if (ret != CRYPT_SUCCESS) {
goto ERR;
}
ret = SetDecoderMethod(ctx, funcs);
if (ret != CRYPT_SUCCESS) {
goto ERR;
}
if (ctx->method->newCtx == NULL || ctx->method->setParam == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_NOT_SUPPORT);
goto ERR;
}
ctx->decoderCtx = ctx->method->newCtx(provCtx);
if (ctx->decoderCtx == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_MEM_ALLOC_FAIL);
goto ERR;
}
BSL_Param param[2] = {{CRYPT_PARAM_DECODE_PROVIDER_CTX, BSL_PARAM_TYPE_CTX_PTR, mgrCtx, 0, 0},
BSL_PARAM_END};
ret = ctx->method->setParam(ctx->decoderCtx, param);
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
goto ERR;
}
if (attrName != NULL) {
ret = CRYPT_DECODE_ParseDecoderAttr(attrName, &attrInfo);
if (ret != CRYPT_SUCCESS) {
goto ERR;
}
}
ctx->providerMgrCtx = mgrCtx;
ctx->inFormat = attrInfo.inFormat;
ctx->inType = attrInfo.inType;
ctx->outFormat = attrInfo.outFormat;
ctx->outType = attrInfo.outType;
ctx->attrName = attrName != NULL ? attrInfo.attrName : NULL;
ctx->decoderState = CRYPT_DECODER_STATE_UNTRIED;
return ctx;
ERR:
CRYPT_DECODE_Free(ctx);
return NULL;
}
CRYPT_DECODER_Ctx *CRYPT_DECODE_ProviderNewCtx(CRYPT_EAL_LibCtx *libCtx, int32_t keyType, const char *attrName)
{
const CRYPT_EAL_Func *funcsDecoder = NULL;
CRYPT_EAL_ProvMgrCtx *mgrCtx = NULL;
int32_t ret = CRYPT_EAL_ProviderGetFuncsAndMgrCtx(libCtx, CRYPT_EAL_OPERAID_DECODER, keyType, attrName,
&funcsDecoder, &mgrCtx);
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
return NULL;
}
if (mgrCtx == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return NULL;
}
return CRYPT_DECODE_NewDecoderCtxByMethod(funcsDecoder, mgrCtx, attrName);
}
/* Free decoder context */
void CRYPT_DECODE_Free(CRYPT_DECODER_Ctx *ctx)
{
if (ctx == NULL) {
return;
}
if (ctx->method != NULL && ctx->method->freeCtx != NULL) {
ctx->method->freeCtx(ctx->decoderCtx);
}
BSL_SAL_Free(ctx->method);
BSL_SAL_Free(ctx->attrName);
BSL_SAL_Free(ctx);
}
/* Set decoder parameters */
int32_t CRYPT_DECODE_SetParam(CRYPT_DECODER_Ctx *ctx, const BSL_Param *param)
{
if (ctx == NULL || param == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return CRYPT_NULL_INPUT;
}
if (ctx->method == NULL || ctx->method->setParam == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_NOT_SUPPORT);
return CRYPT_NOT_SUPPORT;
}
return ctx->method->setParam(ctx->decoderCtx, param);
}
/* Get decoder parameters */
int32_t CRYPT_DECODE_GetParam(CRYPT_DECODER_Ctx *ctx, BSL_Param *param)
{
if (ctx == NULL || param == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return CRYPT_NULL_INPUT;
}
if (ctx->method == NULL || ctx->method->getParam == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_NOT_SUPPORT);
return CRYPT_NOT_SUPPORT;
}
return ctx->method->getParam(ctx->decoderCtx, param);
}
/* Execute decode operation */
int32_t CRYPT_DECODE_Decode(CRYPT_DECODER_Ctx *ctx, const BSL_Param *inParam, BSL_Param **outParam)
{
if (ctx == NULL || inParam == NULL || outParam == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return CRYPT_NULL_INPUT;
}
if (ctx->method == NULL || ctx->method->decode == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_NOT_SUPPORT);
return CRYPT_NOT_SUPPORT;
}
int32_t ret = ctx->method->decode(ctx->decoderCtx, inParam, outParam);
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
}
return ret;
}
void CRYPT_DECODE_FreeOutData(CRYPT_DECODER_Ctx *ctx, BSL_Param *outData)
{
if (ctx == NULL || outData == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return;
}
if (ctx->method == NULL || ctx->method->freeOutData == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_NOT_SUPPORT);
return;
}
ctx->method->freeOutData(ctx->decoderCtx, outData);
}
#endif /* HITLS_CRYPTO_CODECS && HITLS_CRYPTO_PROVIDER */
| 2302_82127028/openHiTLS-examples_1508 | codecs/src/decode.c | C | unknown | 8,345 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#include "hitls_build.h"
#if defined(HITLS_CRYPTO_CODECS) && defined(HITLS_CRYPTO_PROVIDER)
#include <stdint.h>
#include <string.h>
#include "securec.h"
#include "crypt_eal_codecs.h"
#include "crypt_eal_implprovider.h"
#include "crypt_provider.h"
#include "crypt_params_key.h"
#include "crypt_types.h"
#include "crypt_errno.h"
#include "decode_local.h"
#include "bsl_list.h"
#include "bsl_errno.h"
#include "bsl_err_internal.h"
static CRYPT_DECODER_Node *CreateDecoderNode(const char *format, const char *type, const char *targetFormat,
const char *targetType, const BSL_Param *input)
{
CRYPT_DECODER_Node *decoderNode = BSL_SAL_Calloc(1, sizeof(CRYPT_DECODER_Node));
if (decoderNode == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_MEM_ALLOC_FAIL);
return NULL;
}
decoderNode->inData.format = format;
decoderNode->inData.type = type;
decoderNode->inData.data = (BSL_Param *)(uintptr_t)input;
decoderNode->outData.format = targetFormat;
decoderNode->outData.type = targetType;
return decoderNode;
}
static void FreeDecoderNode(CRYPT_DECODER_Node *decoderNode)
{
if (decoderNode == NULL) {
return;
}
CRYPT_DECODE_FreeOutData(decoderNode->decoderCtx, decoderNode->outData.data);
BSL_SAL_Free(decoderNode);
}
CRYPT_DECODER_PoolCtx *CRYPT_DECODE_PoolNewCtx(CRYPT_EAL_LibCtx *libCtx, const char *attrName,
int32_t keyType, const char *format, const char *type)
{
CRYPT_DECODER_PoolCtx *poolCtx = BSL_SAL_Calloc(1, sizeof(CRYPT_DECODER_PoolCtx));
if (poolCtx == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_MEM_ALLOC_FAIL);
return NULL;
}
poolCtx->libCtx = libCtx;
poolCtx->attrName = attrName;
poolCtx->decoders = BSL_LIST_New(sizeof(CRYPT_DECODER_Ctx));
if (poolCtx->decoders == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_MEM_ALLOC_FAIL);
BSL_SAL_Free(poolCtx);
return NULL;
}
poolCtx->decoderPath = BSL_LIST_New(sizeof(CRYPT_DECODER_Node));
if (poolCtx->decoderPath == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_MEM_ALLOC_FAIL);
goto ERR;
}
poolCtx->inputFormat = format;
poolCtx->inputType = type;
poolCtx->inputKeyType = keyType;
poolCtx->targetFormat = NULL;
poolCtx->targetType = NULL;
return poolCtx;
ERR:
BSL_LIST_FREE(poolCtx->decoders, NULL);
BSL_SAL_Free(poolCtx);
return NULL;
}
void CRYPT_DECODE_PoolFreeCtx(CRYPT_DECODER_PoolCtx *poolCtx)
{
if (poolCtx == NULL) {
return;
}
/* Free decoder path list and all decoder nodes */
if (poolCtx->decoderPath != NULL) {
BSL_LIST_FREE(poolCtx->decoderPath, (BSL_LIST_PFUNC_FREE)FreeDecoderNode);
}
/* Free decoder list and all decoder contexts */
if (poolCtx->decoders != NULL) {
BSL_LIST_FREE(poolCtx->decoders, (BSL_LIST_PFUNC_FREE)CRYPT_DECODE_Free);
}
BSL_SAL_Free(poolCtx);
}
static int32_t SetDecodeType(void *val, int32_t valLen, const char **targetValue)
{
if (valLen == 0 || valLen > MAX_CRYPT_DECODE_FORMAT_TYPE_SIZE) {
BSL_ERR_PUSH_ERROR(CRYPT_INVALID_ARG);
return CRYPT_INVALID_ARG;
}
*targetValue = val;
return CRYPT_SUCCESS;
}
static int32_t SetFlagFreeOutData(CRYPT_DECODER_PoolCtx *poolCtx, void *val, int32_t valLen)
{
if (valLen != sizeof(bool)) {
BSL_ERR_PUSH_ERROR(CRYPT_INVALID_ARG);
return CRYPT_INVALID_ARG;
}
if (poolCtx->decoderPath == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_INVALID_ARG);
return CRYPT_INVALID_ARG;
}
CRYPT_DECODER_Node *prevNode = BSL_LIST_GET_PREV(poolCtx->decoderPath);
if (prevNode == NULL) {
return CRYPT_SUCCESS;
}
bool isFreeOutData = *(bool *)val;
if (!isFreeOutData) {
prevNode->outData.data = NULL;
}
return CRYPT_SUCCESS;
}
int32_t CRYPT_DECODE_PoolCtrl(CRYPT_DECODER_PoolCtx *poolCtx, int32_t cmd, void *val, int32_t valLen)
{
if (poolCtx == NULL || val == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return CRYPT_NULL_INPUT;
}
switch (cmd) {
case CRYPT_DECODE_POOL_CMD_SET_TARGET_TYPE:
return SetDecodeType(val, valLen, &poolCtx->targetType);
case CRYPT_DECODE_POOL_CMD_SET_TARGET_FORMAT:
return SetDecodeType(val, valLen, &poolCtx->targetFormat);
case CRYPT_DECODE_POOL_CMD_SET_FLAG_FREE_OUT_DATA:
return SetFlagFreeOutData(poolCtx, val, valLen);
default:
BSL_ERR_PUSH_ERROR(CRYPT_INVALID_ARG);
return CRYPT_INVALID_ARG;
}
}
static int32_t CollectDecoder(CRYPT_DECODER_Ctx *decoderCtx, void *args)
{
int32_t ret;
CRYPT_DECODER_PoolCtx *poolCtx = (CRYPT_DECODER_PoolCtx *)args;
if (poolCtx == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return CRYPT_NULL_INPUT;
}
// TODO: Filter the decoder by input format and type According to poolCtx
BSL_Param param[3] = {
{CRYPT_PARAM_DECODE_LIB_CTX, BSL_PARAM_TYPE_CTX_PTR, poolCtx->libCtx, 0, 0},
{CRYPT_PARAM_DECODE_TARGET_ATTR_NAME, BSL_PARAM_TYPE_OCTETS_PTR, (void *)(uintptr_t)poolCtx->attrName, 0, 0},
BSL_PARAM_END
};
ret = CRYPT_DECODE_SetParam(decoderCtx, param);
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
return ret;
}
ret = BSL_LIST_AddElement(poolCtx->decoders, decoderCtx, BSL_LIST_POS_END);
if (ret != BSL_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
return ret;
}
return CRYPT_SUCCESS;
}
static CRYPT_DECODER_Ctx* GetUsableDecoderFromPool(CRYPT_DECODER_PoolCtx *poolCtx, CRYPT_DECODER_Node *currNode)
{
CRYPT_DECODER_Ctx *decoderCtx = NULL;
const char *curFormat = currNode->inData.format;
const char *curType = currNode->inData.type;
CRYPT_DECODER_Ctx *node = BSL_LIST_GET_FIRST(poolCtx->decoders);
while (node != NULL) {
decoderCtx = node;
if (decoderCtx == NULL || decoderCtx->decoderState != CRYPT_DECODER_STATE_UNTRIED) {
node = BSL_LIST_GET_NEXT(poolCtx->decoders);
continue;
}
/* Check if decoder matches the current node's input format and type */
if (curFormat != NULL && curType != NULL) {
if ((decoderCtx->inFormat != NULL && BSL_SAL_StrcaseCmp(decoderCtx->inFormat, curFormat) == 0) &&
(decoderCtx->inType == NULL || BSL_SAL_StrcaseCmp(decoderCtx->inType, curType) == 0)) {
break;
}
} else if (curFormat == NULL && curType != NULL) {
if (decoderCtx->inType == NULL || BSL_SAL_StrcaseCmp(decoderCtx->inType, curType) == 0) {
break;
}
} else if (curFormat != NULL && curType == NULL) {
if (decoderCtx->inFormat != NULL && BSL_SAL_StrcaseCmp(decoderCtx->inFormat, curFormat) == 0) {
break;
}
} else {
break;
}
node = BSL_LIST_GET_NEXT(poolCtx->decoders);
}
if (node != NULL) {
decoderCtx = node;
decoderCtx->decoderState = CRYPT_DECODER_STATE_TRING;
}
return node != NULL ? decoderCtx : NULL;
}
static int32_t UpdateDecoderPath(CRYPT_DECODER_PoolCtx *poolCtx, CRYPT_DECODER_Node *currNode)
{
/* Create new node */
CRYPT_DECODER_Node *newNode = CreateDecoderNode(currNode->outData.format, currNode->outData.type,
poolCtx->targetFormat, poolCtx->targetType, currNode->outData.data);
if (newNode == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_MEM_ALLOC_FAIL);
return CRYPT_MEM_ALLOC_FAIL;
}
int32_t ret = BSL_LIST_AddElement(poolCtx->decoderPath, newNode, BSL_LIST_POS_END);
if (ret != BSL_SUCCESS) {
BSL_SAL_FREE(newNode);
BSL_ERR_PUSH_ERROR(ret);
return ret;
}
return CRYPT_SUCCESS;
}
static int32_t TryDecodeWithDecoder(CRYPT_DECODER_PoolCtx *poolCtx, CRYPT_DECODER_Node *currNode)
{
/* Convert password buffer to parameter if provided */
BSL_Param *decoderParam = NULL;
int32_t ret = CRYPT_DECODE_Decode(currNode->decoderCtx, currNode->inData.data, &decoderParam);
if (ret == CRYPT_SUCCESS) {
/* Get output format and type from decoder */
BSL_Param outParam[3] = {
{CRYPT_PARAM_DECODE_OUTPUT_FORMAT, BSL_PARAM_TYPE_OCTETS_PTR, NULL, 0, 0},
{CRYPT_PARAM_DECODE_OUTPUT_TYPE, BSL_PARAM_TYPE_OCTETS_PTR, NULL, 0, 0},
BSL_PARAM_END
};
ret = CRYPT_DECODE_GetParam(currNode->decoderCtx, outParam);
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
return ret;
}
currNode->outData.data = decoderParam;
currNode->outData.format = outParam[0].value;
currNode->outData.type = outParam[1].value;
currNode->decoderCtx->decoderState = CRYPT_DECODER_STATE_SUCCESS;
ret = UpdateDecoderPath(poolCtx, currNode);
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
return ret;
}
return CRYPT_SUCCESS;
} else {
/* Mark the node as tried */
currNode->decoderCtx->decoderState = CRYPT_DECODER_STATE_TRIED;
return CRYPT_DECODE_RETRY;
}
}
static void ResetLastNode(CRYPT_DECODER_PoolCtx *poolCtx, CRYPT_DECODER_Node *currNode)
{
(void)currNode;
CRYPT_DECODER_Node *prevNode = BSL_LIST_GET_PREV(poolCtx->decoderPath);
/* Reset the out data of previous node if found */
if (prevNode != NULL) {
CRYPT_DECODE_FreeOutData(prevNode->decoderCtx, prevNode->outData.data);
prevNode->outData.data = NULL;
prevNode->decoderCtx = NULL;
prevNode->outData.format = poolCtx->targetFormat;
prevNode->outData.type = poolCtx->targetType;
(void)BSL_LIST_GET_NEXT(poolCtx->decoderPath);
} else {
(void)BSL_LIST_GET_FIRST(poolCtx->decoderPath);
}
BSL_LIST_DeleteCurrent(poolCtx->decoderPath, (BSL_LIST_PFUNC_FREE)FreeDecoderNode);
(void)BSL_LIST_GET_LAST(poolCtx->decoderPath);
}
static int32_t BackToLastLayerDecodeNode(CRYPT_DECODER_PoolCtx *poolCtx, CRYPT_DECODER_Node *currNode)
{
if (poolCtx == NULL || currNode == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return CRYPT_NULL_INPUT;
}
ResetLastNode(poolCtx, currNode);
/* Reset all decoders marked as tried to untried state */
CRYPT_DECODER_Ctx *decoderCtx = BSL_LIST_GET_FIRST(poolCtx->decoders);
while (decoderCtx != NULL) {
if (decoderCtx->decoderState == CRYPT_DECODER_STATE_TRIED) {
decoderCtx->decoderState = CRYPT_DECODER_STATE_UNTRIED;
}
decoderCtx = BSL_LIST_GET_NEXT(poolCtx->decoders);
}
return CRYPT_SUCCESS;
}
static bool IsStrMatch(const char *source, const char *target)
{
if (source == NULL && target == NULL) {
return true;
}
if (source == NULL || target == NULL) {
return false;
}
return BSL_SAL_StrcaseCmp(source, target) == 0;
}
static int32_t DecodeWithKeyChain(CRYPT_DECODER_PoolCtx *poolCtx, BSL_Param **outParam)
{
int32_t ret;
CRYPT_DECODER_Ctx *decoderCtx = NULL;
CRYPT_DECODER_Node *currNode = BSL_LIST_GET_FIRST(poolCtx->decoderPath);
while (!BSL_LIST_EMPTY(poolCtx->decoderPath)) {
if (IsStrMatch(currNode->inData.format, poolCtx->targetFormat) &&
IsStrMatch(currNode->inData.type, poolCtx->targetType)) {
*outParam = currNode->inData.data;
return CRYPT_SUCCESS;
}
/* Get the usable decoder from the pool */
decoderCtx = GetUsableDecoderFromPool(poolCtx, currNode);
/* If the decoder is found, try to decode */
if (decoderCtx != NULL) {
currNode->decoderCtx = decoderCtx;
ret = TryDecodeWithDecoder(poolCtx, currNode);
if (ret == CRYPT_DECODE_RETRY) {
continue;
}
} else {
ret = BackToLastLayerDecodeNode(poolCtx, currNode);
}
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
return ret;
}
CRYPT_DECODER_Node **curNodePtr = (CRYPT_DECODER_Node **)BSL_LIST_Curr(poolCtx->decoderPath);
currNode = curNodePtr == NULL ? NULL : *curNodePtr;
}
BSL_ERR_PUSH_ERROR(CRYPT_DECODE_ERR_NO_USABLE_DECODER);
return CRYPT_DECODE_ERR_NO_USABLE_DECODER;
}
typedef int32_t (*CRYPT_DECODE_ProviderProcessCb)(CRYPT_DECODER_Ctx *decoderCtx, void *args);
typedef struct {
CRYPT_DECODE_ProviderProcessCb cb;
void *args;
} CRYPT_DECODE_ProviderProcessArgs;
static int32_t ProcessEachProviderDecoder(CRYPT_EAL_ProvMgrCtx *ctx, void *args)
{
CRYPT_DECODE_ProviderProcessArgs *processArgs = (CRYPT_DECODE_ProviderProcessArgs *)args;
CRYPT_DECODER_Ctx *decoderCtx = NULL;
CRYPT_EAL_AlgInfo *algInfos = NULL;
int32_t ret;
if (ctx == NULL || args == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return CRYPT_NULL_INPUT;
}
ret = CRYPT_EAL_ProviderQuery(ctx, CRYPT_EAL_OPERAID_DECODER, &algInfos);
if (ret == CRYPT_NOT_SUPPORT) {
return CRYPT_SUCCESS;
}
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
return ret;
}
for (int32_t i = 0; algInfos != NULL && algInfos[i].algId != 0; i++) {
decoderCtx = CRYPT_DECODE_NewDecoderCtxByMethod(algInfos[i].implFunc, ctx, algInfos[i].attr);
if (decoderCtx == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_MEM_ALLOC_FAIL);
return CRYPT_MEM_ALLOC_FAIL;
}
ret = processArgs->cb(decoderCtx, processArgs->args);
if (ret != CRYPT_SUCCESS) {
CRYPT_DECODE_Free(decoderCtx);
BSL_ERR_PUSH_ERROR(ret);
return ret;
}
}
return CRYPT_SUCCESS;
}
int32_t CRYPT_DECODE_ProviderProcessAll(CRYPT_EAL_LibCtx *ctx, CRYPT_DECODE_ProviderProcessCb cb, void *args)
{
if (cb == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return CRYPT_NULL_INPUT;
}
CRYPT_DECODE_ProviderProcessArgs processArgs = {
.cb = cb,
.args = args
};
int32_t ret = CRYPT_EAL_ProviderProcessAll(ctx, ProcessEachProviderDecoder, &processArgs);
if (ret != CRYPT_SUCCESS) {
return ret;
}
return CRYPT_SUCCESS;
}
int32_t CRYPT_DECODE_PoolDecode(CRYPT_DECODER_PoolCtx *poolCtx, const BSL_Param *inParam, BSL_Param **outParam)
{
if (poolCtx == NULL || inParam == NULL || outParam == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return CRYPT_NULL_INPUT;
}
if (*outParam != NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_INVALID_ARG);
return CRYPT_INVALID_ARG;
}
int32_t ret = CRYPT_DECODE_ProviderProcessAll(poolCtx->libCtx, CollectDecoder, poolCtx);
if (ret != CRYPT_SUCCESS) {
return ret;
}
if (BSL_LIST_COUNT(poolCtx->decoders) == 0) {
BSL_ERR_PUSH_ERROR(CRYPT_DECODE_ERR_NO_DECODER);
return CRYPT_DECODE_ERR_NO_DECODER;
}
CRYPT_DECODER_Node *initialNode = CreateDecoderNode(poolCtx->inputFormat, poolCtx->inputType,
poolCtx->targetFormat, poolCtx->targetType, inParam);
if (initialNode == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_MEM_ALLOC_FAIL);
return CRYPT_MEM_ALLOC_FAIL;
}
ret = BSL_LIST_AddElement(poolCtx->decoderPath, initialNode, BSL_LIST_POS_END);
if (ret != CRYPT_SUCCESS) {
BSL_SAL_Free(initialNode);
BSL_ERR_PUSH_ERROR(ret);
return ret;
}
return DecodeWithKeyChain(poolCtx, outParam);
}
#endif /* HITLS_CRYPTO_CODECS && HITLS_CRYPTO_PROVIDER */
| 2302_82127028/openHiTLS-examples_1508 | codecs/src/decode_chain.c | C | unknown | 16,054 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#ifndef HITLS_BUILD_H
#define HITLS_BUILD_H
#ifdef HITLS_TLS
#include "hitls_config_layer_tls.h"
#endif
#ifdef HITLS_PKI
#include "hitls_config_layer_pki.h"
#endif
#ifdef HITLS_CRYPTO
#include "hitls_config_layer_crypto.h"
#endif
#include "hitls_config_layer_bsl.h"
#ifndef HITLS_NO_CONFIG_CHECK
#include "hitls_config_check.h"
#endif
#if defined(HITLS_CRYPTO_PROVIDER) && defined(HITLS_CONFIG_FILE)
#include HITLS_CONFIG_FILE
#endif
#endif /* HITLS_BUILD_H */
| 2302_82127028/openHiTLS-examples_1508 | config/macro_config/hitls_build.h | C | unknown | 1,012 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
/* Check the dependency of the configuration features. The check rules are as follows:
* Non-deterministic feature dependency needs to be checked.
* For example, feature a depends on feature b or c:
* if feature a is defined, at least one of feature b and c must be defined.
*/
#ifndef HITLS_CONFIG_CHECK_H
#define HITLS_CONFIG_CHECK_H
#ifdef HITLS_TLS
#if defined(HITLS_TLS_FEATURE_PROVIDER) && !defined(HITLS_CRYPTO_PROVIDER)
#error "[HiTLS] The tls-provider must work with crypto-provider"
#endif
#if (defined(HITLS_TLS_FEATURE_PHA) || defined(HITLS_TLS_FEATURE_KEY_UPDATE)) && !defined(HITLS_TLS_PROTO_TLS13)
#error "[HiTLS] Integrity check must work with TLS13"
#endif
#if defined(HITLS_TLS_SUITE_AES_128_GCM_SHA256)
#if !defined(HITLS_CRYPTO_SHA256) || !defined(HITLS_CRYPTO_GCM) || !defined(HITLS_CRYPTO_AES)
#error "[HiTLS] cipher suite HITLS_TLS_SUITE_AES_128_GCM_SHA256 must work with sha256, gcm, aes"
#endif
#endif
#if defined(HITLS_TLS_SUITE_AES_256_GCM_SHA384)
#if !defined(HITLS_CRYPTO_SHA384) || !defined(HITLS_CRYPTO_GCM) || !defined(HITLS_CRYPTO_AES)
#error "[HiTLS] cipher suite HITLS_TLS_SUITE_AES_256_GCM_SHA384 must work with sha384, gcm, aes"
#endif
#endif
#if defined(HITLS_TLS_SUITE_CHACHA20_POLY1305_SHA256)
#if !defined(HITLS_CRYPTO_SHA256) || !defined(HITLS_CRYPTO_CHACHA20POLY1305) || !defined(HITLS_CRYPTO_CHACHA20)
#error "[HiTLS] cipher suite HITLS_TLS_SUITE_CHACHA20_POLY1305_SHA256 must work with sha256, chacha20poly1305, \
chacha20"
#endif
#endif
#if defined(HITLS_TLS_SUITE_AES_128_CCM_SHA256)
#if !defined(HITLS_CRYPTO_SHA256) || !defined(HITLS_CRYPTO_CCM) || !defined(HITLS_CRYPTO_AES)
#error "[HiTLS] cipher suite HITLS_TLS_SUITE_AES_128_CCM_SHA256 must work with sha256, ccm, aes"
#endif
#endif
#if defined(HITLS_TLS_SUITE_AES_128_CCM_8_SHA256)
#if !defined(HITLS_CRYPTO_SHA256) || !defined(HITLS_CRYPTO_CCM) || !defined(HITLS_CRYPTO_AES)
#error "[HiTLS] cipher suite HITLS_TLS_SUITE_AES_128_CCM_8_SHA256 must work with sha256, ccm, aes"
#endif
#endif
#if defined(HITLS_TLS_SUITE_RSA_WITH_AES_128_CBC_SHA)
#if !defined(HITLS_CRYPTO_SHA1) || !defined(HITLS_CRYPTO_CBC) || !defined(HITLS_CRYPTO_AES) || \
!defined(HITLS_CRYPTO_RSA)
#error "[HiTLS] cipher suite HITLS_TLS_SUITE_RSA_WITH_AES_128_CBC_SHA must work with sha1, cbc, aes, rsa"
#endif
#endif
#if defined(HITLS_TLS_SUITE_RSA_WITH_AES_256_CBC_SHA)
#if !defined(HITLS_CRYPTO_SHA1) || !defined(HITLS_CRYPTO_CBC) || !defined(HITLS_CRYPTO_AES) || \
!defined(HITLS_CRYPTO_RSA)
#error "[HiTLS] cipher suite HITLS_TLS_SUITE_RSA_WITH_AES_256_CBC_SHA must work with sha1, cbc, aes, rsa"
#endif
#endif
#if defined(HITLS_TLS_SUITE_RSA_WITH_AES_128_CBC_SHA256)
#if !defined(HITLS_CRYPTO_SHA256) || !defined(HITLS_CRYPTO_CBC) || !defined(HITLS_CRYPTO_AES) || \
!defined(HITLS_CRYPTO_RSA)
#error "[HiTLS] cipher suite HITLS_TLS_SUITE_RSA_WITH_AES_128_CBC_SHA256 must work with sha256, cbc, aes, rsa"
#endif
#endif
#if defined(HITLS_TLS_SUITE_RSA_WITH_AES_256_CBC_SHA256)
#if !defined(HITLS_CRYPTO_SHA256) || !defined(HITLS_CRYPTO_CBC) || !defined(HITLS_CRYPTO_AES) || \
!defined(HITLS_CRYPTO_RSA)
#error "[HiTLS] cipher suite HITLS_TLS_SUITE_RSA_WITH_AES_256_CBC_SHA256 must work with sha256, cbc, aes, rsa"
#endif
#endif
#if defined(HITLS_TLS_SUITE_RSA_WITH_AES_128_GCM_SHA256)
#if !defined(HITLS_CRYPTO_SHA256) || !defined(HITLS_CRYPTO_GCM) || !defined(HITLS_CRYPTO_AES) || \
!defined(HITLS_CRYPTO_RSA)
#error "[HiTLS] cipher suite HITLS_TLS_SUITE_RSA_WITH_AES_128_GCM_SHA256 must work with sha256, gcm, aes, rsa"
#endif
#endif
#if defined(HITLS_TLS_SUITE_RSA_WITH_AES_256_GCM_SHA384)
#if !defined(HITLS_CRYPTO_SHA384) || !defined(HITLS_CRYPTO_GCM) || !defined(HITLS_CRYPTO_AES) || \
!defined(HITLS_CRYPTO_RSA)
#error "[HiTLS] cipher suite HITLS_TLS_SUITE_RSA_WITH_AES_256_GCM_SHA384 must work with sha384, gcm, aes, rsa"
#endif
#endif
#if defined(HITLS_TLS_SUITE_DHE_RSA_WITH_AES_128_GCM_SHA256)
#if !defined(HITLS_CRYPTO_SHA256) || !defined(HITLS_CRYPTO_GCM) || !defined(HITLS_CRYPTO_AES) || \
!defined(HITLS_CRYPTO_RSA) || !defined(HITLS_CRYPTO_DH)
#error "[HiTLS] cipher suite HITLS_TLS_SUITE_DHE_RSA_WITH_AES_128_GCM_SHA256 must work with sha256, gcm, aes, rsa, dh"
#endif
#endif
#if defined(HITLS_TLS_SUITE_DHE_RSA_WITH_AES_256_GCM_SHA384)
#if !defined(HITLS_CRYPTO_SHA384) || !defined(HITLS_CRYPTO_GCM) || !defined(HITLS_CRYPTO_AES) || \
!defined(HITLS_CRYPTO_RSA) || !defined(HITLS_CRYPTO_DH)
#error "[HiTLS] cipher suite HITLS_TLS_SUITE_DHE_RSA_WITH_AES_256_GCM_SHA384 must work with sha384, gcm, aes, rsa, dh"
#endif
#endif
#if defined(HITLS_TLS_SUITE_ECDHE_ECDSA_WITH_AES_128_CBC_SHA)
#if !defined(HITLS_CRYPTO_SHA1) || !defined(HITLS_CRYPTO_CBC) || !defined(HITLS_CRYPTO_AES) || \
!defined(HITLS_CRYPTO_ECDH) || !defined(HITLS_CRYPTO_ECDSA)
#error \
"[HiTLS] cipher suite HITLS_TLS_SUITE_ECDHE_ECDSA_WITH_AES_128_CBC_SHA must work with sha1, cbc, aes, ecdh, ecdsa"
#endif
#endif
#if defined(HITLS_TLS_SUITE_ECDHE_ECDSA_WITH_AES_256_CBC_SHA)
#if !defined(HITLS_CRYPTO_SHA1) || !defined(HITLS_CRYPTO_CBC) || !defined(HITLS_CRYPTO_AES) || \
!defined(HITLS_CRYPTO_ECDH) || !defined(HITLS_CRYPTO_ECDSA)
#error \
"[HiTLS] cipher suite HITLS_TLS_SUITE_ECDHE_ECDSA_WITH_AES_256_CBC_SHA must work with sha1, cbc, aes, ecdh, ecdsa"
#endif
#endif
#if defined(HITLS_TLS_SUITE_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256)
#if !defined(HITLS_CRYPTO_SHA256) || !defined(HITLS_CRYPTO_CBC) || !defined(HITLS_CRYPTO_AES) || \
!defined(HITLS_CRYPTO_ECDH) || !defined(HITLS_CRYPTO_ECDSA)
#error \
"[HiTLS] cipher suite HITLS_TLS_SUITE_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 must work with sha256, cbc, aes, ecdh, \
ecdsa"
#endif
#endif
#if defined(HITLS_TLS_SUITE_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384)
#if !defined(HITLS_CRYPTO_SHA384) || !defined(HITLS_CRYPTO_CBC) || !defined(HITLS_CRYPTO_AES) || \
!defined(HITLS_CRYPTO_ECDH) || !defined(HITLS_CRYPTO_ECDSA)
#error \
"[HiTLS] cipher suite HITLS_TLS_SUITE_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 must work with sha384, cbc, aes, ecdh, \
ecdsa"
#endif
#endif
#if defined(HITLS_TLS_SUITE_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256)
#if !defined(HITLS_CRYPTO_SHA256) || !defined(HITLS_CRYPTO_GCM) || !defined(HITLS_CRYPTO_AES) || \
!defined(HITLS_CRYPTO_ECDH) || !defined(HITLS_CRYPTO_ECDSA)
#error \
"[HiTLS] cipher suite HITLS_TLS_SUITE_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 must work with sha256, gcm, aes, ecdh, \
ecdsa"
#endif
#endif
#if defined(HITLS_TLS_SUITE_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384)
#if !defined(HITLS_CRYPTO_SHA384) || !defined(HITLS_CRYPTO_GCM) || !defined(HITLS_CRYPTO_AES) || \
!defined(HITLS_CRYPTO_ECDH) || !defined(HITLS_CRYPTO_ECDSA)
#error \
"[HiTLS] cipher suite HITLS_TLS_SUITE_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 must work with sha384, gcm, aes, ecdh, \
ecdsa"
#endif
#endif
#if defined(HITLS_TLS_SUITE_ECDHE_RSA_WITH_AES_128_CBC_SHA)
#if !defined(HITLS_CRYPTO_SHA1) || !defined(HITLS_CRYPTO_CBC) || !defined(HITLS_CRYPTO_AES) || \
!defined(HITLS_CRYPTO_RSA) || !defined(HITLS_CRYPTO_ECDH)
#error "[HiTLS] cipher suite HITLS_TLS_SUITE_ECDHE_RSA_WITH_AES_128_CBC_SHA must work with sha1, cbc, aes, rsa, ecdh"
#endif
#endif
#if defined(HITLS_TLS_SUITE_ECDHE_RSA_WITH_AES_256_CBC_SHA)
#if !defined(HITLS_CRYPTO_SHA1) || !defined(HITLS_CRYPTO_CBC) || !defined(HITLS_CRYPTO_AES) || \
!defined(HITLS_CRYPTO_RSA) || !defined(HITLS_CRYPTO_ECDH)
#error "[HiTLS] cipher suite HITLS_TLS_SUITE_ECDHE_RSA_WITH_AES_256_CBC_SHA must work with sha1, cbc, aes, rsa, ecdh"
#endif
#endif
#if defined(HITLS_TLS_SUITE_ECDHE_RSA_WITH_AES_128_CBC_SHA256)
#if !defined(HITLS_CRYPTO_SHA256) || !defined(HITLS_CRYPTO_CBC) || !defined(HITLS_CRYPTO_AES) || \
!defined(HITLS_CRYPTO_RSA) || !defined(HITLS_CRYPTO_ECDH)
#error \
"[HiTLS] cipher suite HITLS_TLS_SUITE_ECDHE_RSA_WITH_AES_128_CBC_SHA256 must work with sha256, cbc, aes, rsa, ecdh"
#endif
#endif
#if defined(HITLS_TLS_SUITE_ECDHE_RSA_WITH_AES_256_CBC_SHA384)
#if !defined(HITLS_CRYPTO_SHA384) || !defined(HITLS_CRYPTO_CBC) || !defined(HITLS_CRYPTO_AES) || \
!defined(HITLS_CRYPTO_RSA) || !defined(HITLS_CRYPTO_ECDH)
#error \
"[HiTLS] cipher suite HITLS_TLS_SUITE_ECDHE_RSA_WITH_AES_256_CBC_SHA384 must work with sha384, cbc, aes, rsa, ecdh"
#endif
#endif
#if defined(HITLS_TLS_SUITE_ECDHE_RSA_WITH_AES_128_GCM_SHA256)
#if !defined(HITLS_CRYPTO_SHA256) || !defined(HITLS_CRYPTO_GCM) || !defined(HITLS_CRYPTO_AES) || \
!defined(HITLS_CRYPTO_RSA) || !defined(HITLS_CRYPTO_ECDH)
#error \
"[HiTLS] cipher suite HITLS_TLS_SUITE_ECDHE_RSA_WITH_AES_128_GCM_SHA256 must work with sha256, gcm, aes, rsa, ecdh"
#endif
#endif
#if defined(HITLS_TLS_SUITE_ECDHE_RSA_WITH_AES_256_GCM_SHA384)
#if !defined(HITLS_CRYPTO_SHA384) || !defined(HITLS_CRYPTO_GCM) || !defined(HITLS_CRYPTO_AES) || \
!defined(HITLS_CRYPTO_RSA) || !defined(HITLS_CRYPTO_ECDH)
#error \
"[HiTLS] cipher suite HITLS_TLS_SUITE_ECDHE_RSA_WITH_AES_256_GCM_SHA384 must work with sha384, gcm, aes, rsa, ecdh"
#endif
#endif
#if defined(HITLS_TLS_SUITE_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256)
#if !defined(HITLS_CRYPTO_SHA256) || !defined(HITLS_CRYPTO_CHACHA20POLY1305) || !defined(HITLS_CRYPTO_CHACHA20) || \
!defined(HITLS_CRYPTO_RSA) || !defined(HITLS_CRYPTO_ECDH)
#error \
"[HiTLS] cipher suite HITLS_TLS_SUITE_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 must work with sha256, \
chacha20poly1305, chacha20, rsa, ecdh"
#endif
#endif
#if defined(HITLS_TLS_SUITE_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256)
#if !defined(HITLS_CRYPTO_SHA256) || !defined(HITLS_CRYPTO_CHACHA20POLY1305) || !defined(HITLS_CRYPTO_CHACHA20) || \
!defined(HITLS_CRYPTO_ECDH) || !defined(HITLS_CRYPTO_ECDSA)
#error \
"[HiTLS] cipher suite HITLS_TLS_SUITE_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 must work with sha256, \
chacha20poly1305, chacha20, ecdh, ecdsa"
#endif
#endif
#if defined(HITLS_TLS_SUITE_DHE_RSA_WITH_CHACHA20_POLY1305_SHA256)
#if !defined(HITLS_CRYPTO_SHA256) || !defined(HITLS_CRYPTO_CHACHA20POLY1305) || !defined(HITLS_CRYPTO_CHACHA20) || \
!defined(HITLS_CRYPTO_RSA) || !defined(HITLS_CRYPTO_DH)
#error \
"[HiTLS] cipher suite HITLS_TLS_SUITE_DHE_RSA_WITH_CHACHA20_POLY1305_SHA256 must work with sha256, \
chacha20poly1305, chacha20, rsa, dh"
#endif
#endif
#if defined(HITLS_TLS_SUITE_DHE_DSS_WITH_AES_128_GCM_SHA256)
#if !defined(HITLS_CRYPTO_SHA256) || !defined(HITLS_CRYPTO_GCM) || !defined(HITLS_CRYPTO_AES) || \
!defined(HITLS_CRYPTO_DSA) || !defined(HITLS_CRYPTO_DH)
#error "[HiTLS] cipher suite HITLS_TLS_SUITE_DHE_DSS_WITH_AES_128_GCM_SHA256 must work with sha256, gcm, aes, dsa, dh"
#endif
#endif
#if defined(HITLS_TLS_SUITE_DHE_DSS_WITH_AES_256_GCM_SHA384)
#if !defined(HITLS_CRYPTO_SHA384) || !defined(HITLS_CRYPTO_GCM) || !defined(HITLS_CRYPTO_AES) || \
!defined(HITLS_CRYPTO_DSA) || !defined(HITLS_CRYPTO_DH)
#error "[HiTLS] cipher suite HITLS_TLS_SUITE_DHE_DSS_WITH_AES_256_GCM_SHA384 must work with sha384, gcm, aes, dsa, dh"
#endif
#endif
#if defined(HITLS_TLS_SUITE_DHE_DSS_WITH_AES_128_CBC_SHA)
#if !defined(HITLS_CRYPTO_SHA1) || !defined(HITLS_CRYPTO_CBC) || !defined(HITLS_CRYPTO_AES) || \
!defined(HITLS_CRYPTO_DSA) || !defined(HITLS_CRYPTO_DH)
#error "[HiTLS] cipher suite HITLS_TLS_SUITE_DHE_DSS_WITH_AES_128_CBC_SHA must work with sha1, cbc, aes, dsa, dh"
#endif
#endif
#if defined(HITLS_TLS_SUITE_DHE_DSS_WITH_AES_256_CBC_SHA)
#if !defined(HITLS_CRYPTO_SHA1) || !defined(HITLS_CRYPTO_CBC) || !defined(HITLS_CRYPTO_AES) || \
!defined(HITLS_CRYPTO_DSA) || !defined(HITLS_CRYPTO_DH)
#error "[HiTLS] cipher suite HITLS_TLS_SUITE_DHE_DSS_WITH_AES_256_CBC_SHA must work with sha1, cbc, aes, dsa, dh"
#endif
#endif
#if defined(HITLS_TLS_SUITE_DHE_DSS_WITH_AES_128_CBC_SHA256)
#if !defined(HITLS_CRYPTO_SHA256) || !defined(HITLS_CRYPTO_CBC) || !defined(HITLS_CRYPTO_AES) || \
!defined(HITLS_CRYPTO_DSA) || !defined(HITLS_CRYPTO_DH)
#error "[HiTLS] cipher suite HITLS_TLS_SUITE_DHE_DSS_WITH_AES_128_CBC_SHA256 must work with sha256, cbc, aes, dsa, dh"
#endif
#endif
#if defined(HITLS_TLS_SUITE_DHE_DSS_WITH_AES_256_CBC_SHA256)
#if !defined(HITLS_CRYPTO_SHA256) || !defined(HITLS_CRYPTO_CBC) || !defined(HITLS_CRYPTO_AES) || \
!defined(HITLS_CRYPTO_DSA) || !defined(HITLS_CRYPTO_DH)
#error "[HiTLS] cipher suite HITLS_TLS_SUITE_DHE_DSS_WITH_AES_256_CBC_SHA256 must work with sha256, cbc, aes, dsa, dh"
#endif
#endif
#if defined(HITLS_TLS_SUITE_DHE_RSA_WITH_AES_128_CBC_SHA)
#if !defined(HITLS_CRYPTO_SHA1) || !defined(HITLS_CRYPTO_CBC) || !defined(HITLS_CRYPTO_AES) || \
!defined(HITLS_CRYPTO_RSA) || !defined(HITLS_CRYPTO_DH)
#error "[HiTLS] cipher suite HITLS_TLS_SUITE_DHE_RSA_WITH_AES_128_CBC_SHA must work with sha1, cbc, aes, rsa, dh"
#endif
#endif
#if defined(HITLS_TLS_SUITE_DHE_RSA_WITH_AES_256_CBC_SHA)
#if !defined(HITLS_CRYPTO_SHA1) || !defined(HITLS_CRYPTO_CBC) || !defined(HITLS_CRYPTO_AES) || \
!defined(HITLS_CRYPTO_RSA) || !defined(HITLS_CRYPTO_DH)
#error "[HiTLS] cipher suite HITLS_TLS_SUITE_DHE_RSA_WITH_AES_256_CBC_SHA must work with sha1, cbc, aes, rsa, dh"
#endif
#endif
#if defined(HITLS_TLS_SUITE_DHE_RSA_WITH_AES_128_CBC_SHA256)
#if !defined(HITLS_CRYPTO_SHA256) || !defined(HITLS_CRYPTO_CBC) || !defined(HITLS_CRYPTO_AES) || \
!defined(HITLS_CRYPTO_RSA) || !defined(HITLS_CRYPTO_DH)
#error "[HiTLS] cipher suite HITLS_TLS_SUITE_DHE_RSA_WITH_AES_128_CBC_SHA256 must work with sha256, cbc, aes, rsa, dh"
#endif
#endif
#if defined(HITLS_TLS_SUITE_DHE_RSA_WITH_AES_256_CBC_SHA256)
#if !defined(HITLS_CRYPTO_SHA256) || !defined(HITLS_CRYPTO_CBC) || !defined(HITLS_CRYPTO_AES) || \
!defined(HITLS_CRYPTO_RSA) || !defined(HITLS_CRYPTO_DH)
#error "[HiTLS] cipher suite HITLS_TLS_SUITE_DHE_RSA_WITH_AES_256_CBC_SHA256 must work with sha256, cbc, aes, rsa, dh"
#endif
#endif
#if defined(HITLS_TLS_SUITE_PSK_WITH_AES_128_CBC_SHA)
#if !defined(HITLS_CRYPTO_SHA1) || !defined(HITLS_CRYPTO_CBC) || !defined(HITLS_CRYPTO_AES)
#error "[HiTLS] cipher suite HITLS_TLS_SUITE_PSK_WITH_AES_128_CBC_SHA must work with sha1, cbc, aes"
#endif
#endif
#if defined(HITLS_TLS_SUITE_PSK_WITH_AES_256_CBC_SHA)
#if !defined(HITLS_CRYPTO_SHA1) || !defined(HITLS_CRYPTO_CBC) || !defined(HITLS_CRYPTO_AES)
#error "[HiTLS] cipher suite HITLS_TLS_SUITE_PSK_WITH_AES_256_CBC_SHA must work with sha1, cbc, aes"
#endif
#endif
#if defined(HITLS_TLS_SUITE_DHE_PSK_WITH_AES_128_CBC_SHA)
#if !defined(HITLS_CRYPTO_SHA1) || !defined(HITLS_CRYPTO_CBC) || !defined(HITLS_CRYPTO_AES) || \
!defined(HITLS_CRYPTO_DH)
#error "[HiTLS] cipher suite HITLS_TLS_SUITE_DHE_PSK_WITH_AES_128_CBC_SHA must work with sha1, cbc, aes, dh"
#endif
#endif
#if defined(HITLS_TLS_SUITE_DHE_PSK_WITH_AES_256_CBC_SHA)
#if !defined(HITLS_CRYPTO_SHA1) || !defined(HITLS_CRYPTO_CBC) || !defined(HITLS_CRYPTO_AES) || \
!defined(HITLS_CRYPTO_DH)
#error "[HiTLS] cipher suite HITLS_TLS_SUITE_DHE_PSK_WITH_AES_256_CBC_SHA must work with sha1, cbc, aes, dh"
#endif
#endif
#if defined(HITLS_TLS_SUITE_RSA_PSK_WITH_AES_128_CBC_SHA)
#if !defined(HITLS_CRYPTO_SHA1) || !defined(HITLS_CRYPTO_CBC) || !defined(HITLS_CRYPTO_AES) || \
!defined(HITLS_CRYPTO_RSA)
#error "[HiTLS] cipher suite HITLS_TLS_SUITE_RSA_PSK_WITH_AES_128_CBC_SHA must work with sha1, cbc, aes, rsa"
#endif
#endif
#if defined(HITLS_TLS_SUITE_RSA_PSK_WITH_AES_256_CBC_SHA)
#if !defined(HITLS_CRYPTO_SHA1) || !defined(HITLS_CRYPTO_CBC) || !defined(HITLS_CRYPTO_AES) || \
!defined(HITLS_CRYPTO_RSA)
#error "[HiTLS] cipher suite HITLS_TLS_SUITE_RSA_PSK_WITH_AES_256_CBC_SHA must work with sha1, cbc, aes, rsa"
#endif
#endif
#if defined(HITLS_TLS_SUITE_PSK_WITH_AES_128_GCM_SHA256)
#if !defined(HITLS_CRYPTO_SHA256) || !defined(HITLS_CRYPTO_GCM) || !defined(HITLS_CRYPTO_AES)
#error "[HiTLS] cipher suite HITLS_TLS_SUITE_PSK_WITH_AES_128_GCM_SHA256 must work with sha256, gcm, aes"
#endif
#endif
#if defined(HITLS_TLS_SUITE_PSK_WITH_AES_256_GCM_SHA384)
#if !defined(HITLS_CRYPTO_SHA384) || !defined(HITLS_CRYPTO_GCM) || !defined(HITLS_CRYPTO_AES)
#error "[HiTLS] cipher suite HITLS_TLS_SUITE_PSK_WITH_AES_256_GCM_SHA384 must work with sha384, gcm, aes"
#endif
#endif
#if defined(HITLS_TLS_SUITE_PSK_WITH_AES_256_CCM)
#if !defined(HITLS_CRYPTO_CCM) || !defined(HITLS_CRYPTO_AES)
#error "[HiTLS] cipher suite HITLS_TLS_SUITE_PSK_WITH_AES_256_CCM must work with ccm, aes"
#endif
#endif
#if defined(HITLS_TLS_SUITE_DHE_PSK_WITH_AES_128_GCM_SHA256)
#if !defined(HITLS_CRYPTO_SHA256) || !defined(HITLS_CRYPTO_GCM) || !defined(HITLS_CRYPTO_AES) || \
!defined(HITLS_CRYPTO_DH)
#error "[HiTLS] cipher suite HITLS_TLS_SUITE_DHE_PSK_WITH_AES_128_GCM_SHA256 must work with sha256, gcm, aes, dh"
#endif
#endif
#if defined(HITLS_TLS_SUITE_DHE_PSK_WITH_AES_256_GCM_SHA384)
#if !defined(HITLS_CRYPTO_SHA384) || !defined(HITLS_CRYPTO_GCM) || !defined(HITLS_CRYPTO_AES) || \
!defined(HITLS_CRYPTO_DH)
#error "[HiTLS] cipher suite HITLS_TLS_SUITE_DHE_PSK_WITH_AES_256_GCM_SHA384 must work with sha384, gcm, aes, dh"
#endif
#endif
#if defined(HITLS_TLS_SUITE_DHE_PSK_WITH_AES_128_CCM)
#if !defined(HITLS_CRYPTO_CCM) || !defined(HITLS_CRYPTO_AES) || !defined(HITLS_CRYPTO_DH)
#error "[HiTLS] cipher suite HITLS_TLS_SUITE_DHE_PSK_WITH_AES_128_CCM must work with ccm, aes, dh"
#endif
#endif
#if defined(HITLS_TLS_SUITE_DHE_PSK_WITH_AES_256_CCM)
#if !defined(HITLS_CRYPTO_CCM) || !defined(HITLS_CRYPTO_AES) || !defined(HITLS_CRYPTO_DH)
#error "[HiTLS] cipher suite HITLS_TLS_SUITE_DHE_PSK_WITH_AES_256_CCM must work with ccm, aes, dh"
#endif
#endif
#if defined(HITLS_TLS_SUITE_RSA_PSK_WITH_AES_128_GCM_SHA256)
#if !defined(HITLS_CRYPTO_SHA256) || !defined(HITLS_CRYPTO_GCM) || !defined(HITLS_CRYPTO_AES) || \
!defined(HITLS_CRYPTO_RSA)
#error "[HiTLS] cipher suite HITLS_TLS_SUITE_RSA_PSK_WITH_AES_128_GCM_SHA256 must work with sha256, gcm, aes, rsa"
#endif
#endif
#if defined(HITLS_TLS_SUITE_RSA_PSK_WITH_AES_256_GCM_SHA384)
#if !defined(HITLS_CRYPTO_SHA384) || !defined(HITLS_CRYPTO_GCM) || !defined(HITLS_CRYPTO_AES) || \
!defined(HITLS_CRYPTO_RSA)
#error "[HiTLS] cipher suite HITLS_TLS_SUITE_RSA_PSK_WITH_AES_256_GCM_SHA384 must work with sha384, gcm, aes, rsa"
#endif
#endif
#if defined(HITLS_TLS_SUITE_PSK_WITH_AES_128_CBC_SHA256)
#if !defined(HITLS_CRYPTO_SHA256) || !defined(HITLS_CRYPTO_CBC) || !defined(HITLS_CRYPTO_AES)
#error "[HiTLS] cipher suite HITLS_TLS_SUITE_PSK_WITH_AES_128_CBC_SHA256 must work with sha256, cbc, aes"
#endif
#endif
#if defined(HITLS_TLS_SUITE_PSK_WITH_AES_256_CBC_SHA384)
#if !defined(HITLS_CRYPTO_SHA384) || !defined(HITLS_CRYPTO_CBC) || !defined(HITLS_CRYPTO_AES)
#error "[HiTLS] cipher suite HITLS_TLS_SUITE_PSK_WITH_AES_256_CBC_SHA384 must work with sha384, cbc, aes"
#endif
#endif
#if defined(HITLS_TLS_SUITE_DHE_PSK_WITH_AES_128_CBC_SHA256)
#if !defined(HITLS_CRYPTO_SHA256) || !defined(HITLS_CRYPTO_CBC) || !defined(HITLS_CRYPTO_AES) || \
!defined(HITLS_CRYPTO_DH)
#error "[HiTLS] cipher suite HITLS_TLS_SUITE_DHE_PSK_WITH_AES_128_CBC_SHA256 must work with sha256, cbc, aes, dh"
#endif
#endif
#if defined(HITLS_TLS_SUITE_DHE_PSK_WITH_AES_256_CBC_SHA384)
#if !defined(HITLS_CRYPTO_SHA384) || !defined(HITLS_CRYPTO_CBC) || !defined(HITLS_CRYPTO_AES) || \
!defined(HITLS_CRYPTO_DH)
#error "[HiTLS] cipher suite HITLS_TLS_SUITE_DHE_PSK_WITH_AES_256_CBC_SHA384 must work with sha384, cbc, aes, dh"
#endif
#endif
#if defined(HITLS_TLS_SUITE_RSA_PSK_WITH_AES_128_CBC_SHA256)
#if !defined(HITLS_CRYPTO_SHA256) || !defined(HITLS_CRYPTO_CBC) || !defined(HITLS_CRYPTO_AES) || \
!defined(HITLS_CRYPTO_RSA)
#error "[HiTLS] cipher suite HITLS_TLS_SUITE_RSA_PSK_WITH_AES_128_CBC_SHA256 must work with sha256, cbc, aes, rsa"
#endif
#endif
#if defined(HITLS_TLS_SUITE_RSA_PSK_WITH_AES_256_CBC_SHA384)
#if !defined(HITLS_CRYPTO_SHA384) || !defined(HITLS_CRYPTO_CBC) || !defined(HITLS_CRYPTO_AES) || \
!defined(HITLS_CRYPTO_RSA)
#error "[HiTLS] cipher suite HITLS_TLS_SUITE_RSA_PSK_WITH_AES_256_CBC_SHA384 must work with sha384, cbc, aes, rsa"
#endif
#endif
#if defined(HITLS_TLS_SUITE_ECDHE_PSK_WITH_AES_128_CBC_SHA)
#if !defined(HITLS_CRYPTO_SHA1) || !defined(HITLS_CRYPTO_CBC) || !defined(HITLS_CRYPTO_AES) || \
!defined(HITLS_CRYPTO_ECDH)
#error "[HiTLS] cipher suite HITLS_TLS_SUITE_ECDHE_PSK_WITH_AES_128_CBC_SHA must work with sha1, cbc, aes, ecdh"
#endif
#endif
#if defined(HITLS_TLS_SUITE_ECDHE_PSK_WITH_AES_256_CBC_SHA)
#if !defined(HITLS_CRYPTO_SHA1) || !defined(HITLS_CRYPTO_CBC) || !defined(HITLS_CRYPTO_AES) || \
!defined(HITLS_CRYPTO_ECDH)
#error "[HiTLS] cipher suite HITLS_TLS_SUITE_ECDHE_PSK_WITH_AES_256_CBC_SHA must work with sha1, cbc, aes, ecdh"
#endif
#endif
#if defined(HITLS_TLS_SUITE_ECDHE_PSK_WITH_AES_128_CBC_SHA256)
#if !defined(HITLS_CRYPTO_SHA256) || !defined(HITLS_CRYPTO_CBC) || !defined(HITLS_CRYPTO_AES) || \
!defined(HITLS_CRYPTO_ECDH)
#error "[HiTLS] cipher suite HITLS_TLS_SUITE_ECDHE_PSK_WITH_AES_128_CBC_SHA256 must work with sha256, cbc, aes, ecdh"
#endif
#endif
#if defined(HITLS_TLS_SUITE_ECDHE_PSK_WITH_AES_256_CBC_SHA384)
#if !defined(HITLS_CRYPTO_SHA384) || !defined(HITLS_CRYPTO_CBC) || !defined(HITLS_CRYPTO_AES) || \
!defined(HITLS_CRYPTO_ECDH)
#error "[HiTLS] cipher suite HITLS_TLS_SUITE_ECDHE_PSK_WITH_AES_256_CBC_SHA384 must work with sha384, cbc, aes, ecdh"
#endif
#endif
#if defined(HITLS_TLS_SUITE_PSK_WITH_CHACHA20_POLY1305_SHA256)
#if !defined(HITLS_CRYPTO_SHA256) || !defined(HITLS_CRYPTO_CHACHA20POLY1305) || !defined(HITLS_CRYPTO_CHACHA20)
#error \
"[HiTLS] cipher suite HITLS_TLS_SUITE_PSK_WITH_CHACHA20_POLY1305_SHA256 must work with sha256, chacha20poly1305, \
chacha20"
#endif
#endif
#if defined(HITLS_TLS_SUITE_ECDHE_PSK_WITH_CHACHA20_POLY1305_SHA256)
#if !defined(HITLS_CRYPTO_SHA256) || !defined(HITLS_CRYPTO_CHACHA20POLY1305) || !defined(HITLS_CRYPTO_CHACHA20) || \
!defined(HITLS_CRYPTO_ECDH)
#error \
"[HiTLS] cipher suite HITLS_TLS_SUITE_ECDHE_PSK_WITH_CHACHA20_POLY1305_SHA256 must work with sha256, \
chacha20poly1305, chacha20, ecdh"
#endif
#endif
#if defined(HITLS_TLS_SUITE_DHE_PSK_WITH_CHACHA20_POLY1305_SHA256)
#if !defined(HITLS_CRYPTO_SHA256) || !defined(HITLS_CRYPTO_CHACHA20POLY1305) || !defined(HITLS_CRYPTO_CHACHA20) || \
!defined(HITLS_CRYPTO_DH)
#error \
"[HiTLS] cipher suite HITLS_TLS_SUITE_DHE_PSK_WITH_CHACHA20_POLY1305_SHA256 must work with sha256, \
chacha20poly1305, chacha20, dh"
#endif
#endif
#if defined(HITLS_TLS_SUITE_RSA_PSK_WITH_CHACHA20_POLY1305_SHA256)
#if !defined(HITLS_CRYPTO_SHA256) || !defined(HITLS_CRYPTO_CHACHA20POLY1305) || !defined(HITLS_CRYPTO_CHACHA20) || \
!defined(HITLS_CRYPTO_RSA)
#error \
"[HiTLS] cipher suite HITLS_TLS_SUITE_RSA_PSK_WITH_CHACHA20_POLY1305_SHA256 must work with sha256, \
chacha20poly1305, chacha20, rsa"
#endif
#endif
#if defined(HITLS_TLS_SUITE_ECDHE_PSK_WITH_AES_128_CCM_SHA256)
#if !defined(HITLS_CRYPTO_SHA256) || !defined(HITLS_CRYPTO_CCM) || !defined(HITLS_CRYPTO_AES) || \
!defined(HITLS_CRYPTO_ECDH)
#error "[HiTLS] cipher suite HITLS_TLS_SUITE_ECDHE_PSK_WITH_AES_128_CCM_SHA256 must work with sha256, ccm, aes, ecdh"
#endif
#endif
#if defined(HITLS_TLS_SUITE_ECDHE_PSK_WITH_AES_128_GCM_SHA256)
#if !defined(HITLS_CRYPTO_SHA256) || !defined(HITLS_CRYPTO_GCM) || !defined(HITLS_CRYPTO_AES) || \
!defined(HITLS_CRYPTO_ECDH)
#error "[HiTLS] cipher suite HITLS_TLS_SUITE_ECDHE_PSK_WITH_AES_128_GCM_SHA256 must work with sha256, gcm, aes, ecdh"
#endif
#endif
#if defined(HITLS_TLS_SUITE_ECDHE_PSK_WITH_AES_256_GCM_SHA384)
#if !defined(HITLS_CRYPTO_SHA384) || !defined(HITLS_CRYPTO_GCM) || !defined(HITLS_CRYPTO_AES) || \
!defined(HITLS_CRYPTO_ECDH)
#error "[HiTLS] cipher suite HITLS_TLS_SUITE_ECDHE_PSK_WITH_AES_256_GCM_SHA384 must work with sha384, gcm, aes, ecdh"
#endif
#endif
#if defined(HITLS_TLS_SUITE_DH_ANON_WITH_AES_128_CBC_SHA)
#if !defined(HITLS_CRYPTO_SHA1) || !defined(HITLS_CRYPTO_CBC) || !defined(HITLS_CRYPTO_AES) || \
!defined(HITLS_CRYPTO_DH)
#error "[HiTLS] cipher suite HITLS_TLS_SUITE_DH_ANON_WITH_AES_128_CBC_SHA must work with sha1, cbc, aes, dh"
#endif
#endif
#if defined(HITLS_TLS_SUITE_DH_ANON_WITH_AES_256_CBC_SHA)
#if !defined(HITLS_CRYPTO_SHA1) || !defined(HITLS_CRYPTO_CBC) || !defined(HITLS_CRYPTO_AES) || \
!defined(HITLS_CRYPTO_DH)
#error "[HiTLS] cipher suite HITLS_TLS_SUITE_DH_ANON_WITH_AES_256_CBC_SHA must work with sha1, cbc, aes, dh"
#endif
#endif
#if defined(HITLS_TLS_SUITE_DH_ANON_WITH_AES_128_CBC_SHA256)
#if !defined(HITLS_CRYPTO_SHA256) || !defined(HITLS_CRYPTO_CBC) || !defined(HITLS_CRYPTO_AES) || \
!defined(HITLS_CRYPTO_DH)
#error "[HiTLS] cipher suite HITLS_TLS_SUITE_DH_ANON_WITH_AES_128_CBC_SHA256 must work with sha256, cbc, aes, dh"
#endif
#endif
#if defined(HITLS_TLS_SUITE_DH_ANON_WITH_AES_256_CBC_SHA256)
#if !defined(HITLS_CRYPTO_SHA256) || !defined(HITLS_CRYPTO_CBC) || !defined(HITLS_CRYPTO_AES) || \
!defined(HITLS_CRYPTO_DH)
#error "[HiTLS] cipher suite HITLS_TLS_SUITE_DH_ANON_WITH_AES_256_CBC_SHA256 must work with sha256, cbc, aes, dh"
#endif
#endif
#if defined(HITLS_TLS_SUITE_DH_ANON_WITH_AES_128_GCM_SHA256)
#if !defined(HITLS_CRYPTO_SHA256) || !defined(HITLS_CRYPTO_GCM) || !defined(HITLS_CRYPTO_AES) || \
!defined(HITLS_CRYPTO_DH)
#error "[HiTLS] cipher suite HITLS_TLS_SUITE_DH_ANON_WITH_AES_128_GCM_SHA256 must work with sha256, gcm, aes, dh"
#endif
#endif
#if defined(HITLS_TLS_SUITE_DH_ANON_WITH_AES_256_GCM_SHA384)
#if !defined(HITLS_CRYPTO_SHA384) || !defined(HITLS_CRYPTO_GCM) || !defined(HITLS_CRYPTO_AES) || \
!defined(HITLS_CRYPTO_DH)
#error "[HiTLS] cipher suite HITLS_TLS_SUITE_DH_ANON_WITH_AES_256_GCM_SHA384 must work with sha384, gcm, aes, dh"
#endif
#endif
#if defined(HITLS_TLS_SUITE_ECDH_ANON_WITH_AES_128_CBC_SHA)
#if !defined(HITLS_CRYPTO_SHA1) || !defined(HITLS_CRYPTO_CBC) || !defined(HITLS_CRYPTO_AES) || \
!defined(HITLS_CRYPTO_DH) || !defined(HITLS_CRYPTO_ECDH)
#error "[HiTLS] cipher suite HITLS_TLS_SUITE_ECDH_ANON_WITH_AES_128_CBC_SHA must work with sha1, cbc, aes, dh, ecdh"
#endif
#endif
#if defined(HITLS_TLS_SUITE_ECDH_ANON_WITH_AES_256_CBC_SHA)
#if !defined(HITLS_CRYPTO_SHA1) || !defined(HITLS_CRYPTO_CBC) || !defined(HITLS_CRYPTO_AES) || \
!defined(HITLS_CRYPTO_DH) || !defined(HITLS_CRYPTO_ECDH)
#error "[HiTLS] cipher suite HITLS_TLS_SUITE_ECDH_ANON_WITH_AES_256_CBC_SHA must work with sha1, cbc, aes, dh, ecdh"
#endif
#endif
#if defined(HITLS_TLS_SUITE_ECDHE_ECDSA_WITH_AES_128_CCM)
#if !defined(HITLS_CRYPTO_CCM) || !defined(HITLS_CRYPTO_AES) || !defined(HITLS_CRYPTO_ECDH) || \
!defined(HITLS_CRYPTO_ECDSA)
#error "[HiTLS] cipher suite HITLS_TLS_SUITE_ECDHE_ECDSA_WITH_AES_128_CCM must work with ccm, aes, ecdh, ecdsa"
#endif
#endif
#if defined(HITLS_TLS_SUITE_ECDHE_ECDSA_WITH_AES_256_CCM)
#if !defined(HITLS_CRYPTO_CCM) || !defined(HITLS_CRYPTO_AES) || !defined(HITLS_CRYPTO_ECDH) || \
!defined(HITLS_CRYPTO_ECDSA)
#error "[HiTLS] cipher suite HITLS_TLS_SUITE_ECDHE_ECDSA_WITH_AES_256_CCM must work with ccm, aes, ecdh, ecdsa"
#endif
#endif
#if defined(HITLS_TLS_SUITE_DHE_RSA_WITH_AES_128_CCM)
#if !defined(HITLS_CRYPTO_CCM) || !defined(HITLS_CRYPTO_AES) || !defined(HITLS_CRYPTO_RSA) || \
!defined(HITLS_CRYPTO_DH)
#error "[HiTLS] cipher suite HITLS_TLS_SUITE_DHE_RSA_WITH_AES_128_CCM must work with ccm, aes, rsa, dh"
#endif
#endif
#if defined(HITLS_TLS_SUITE_DHE_RSA_WITH_AES_256_CCM)
#if !defined(HITLS_CRYPTO_CCM) || !defined(HITLS_CRYPTO_AES) || !defined(HITLS_CRYPTO_RSA) || \
!defined(HITLS_CRYPTO_DH)
#error "[HiTLS] cipher suite HITLS_TLS_SUITE_DHE_RSA_WITH_AES_256_CCM must work with ccm, aes, rsa, dh"
#endif
#endif
#if defined(HITLS_TLS_SUITE_RSA_WITH_AES_128_CCM)
#if !defined(HITLS_CRYPTO_CCM) || !defined(HITLS_CRYPTO_AES) || !defined(HITLS_CRYPTO_RSA)
#error "[HiTLS] cipher suite HITLS_TLS_SUITE_RSA_WITH_AES_128_CCM must work with ccm, aes, rsa"
#endif
#endif
#if defined(HITLS_TLS_SUITE_RSA_WITH_AES_128_CCM_8)
#if !defined(HITLS_CRYPTO_CCM) || !defined(HITLS_CRYPTO_AES) || !defined(HITLS_CRYPTO_RSA)
#error "[HiTLS] cipher suite HITLS_TLS_SUITE_RSA_WITH_AES_128_CCM_8 must work with ccm, aes, rsa"
#endif
#endif
#if defined(HITLS_TLS_SUITE_RSA_WITH_AES_256_CCM)
#if !defined(HITLS_CRYPTO_CCM) || !defined(HITLS_CRYPTO_AES) || !defined(HITLS_CRYPTO_RSA)
#error "[HiTLS] cipher suite HITLS_TLS_SUITE_RSA_WITH_AES_256_CCM must work with ccm, aes, rsa"
#endif
#endif
#if defined(HITLS_TLS_SUITE_RSA_WITH_AES_256_CCM_8)
#if !defined(HITLS_CRYPTO_CCM) || !defined(HITLS_CRYPTO_AES) || !defined(HITLS_CRYPTO_RSA)
#error "[HiTLS] cipher suite HITLS_TLS_SUITE_RSA_WITH_AES_256_CCM_8 must work with ccm, aes, rsa"
#endif
#endif
#if defined(HITLS_TLS_SUITE_ECDHE_SM4_CBC_SM3)
#if !defined(HITLS_CRYPTO_SM3) || !defined(HITLS_CRYPTO_CBC) || !defined(HITLS_CRYPTO_SM4) || \
!defined(HITLS_CRYPTO_SM2) || !defined(HITLS_CRYPTO_ECDH)
#error "[HiTLS] cipher suite HITLS_TLS_SUITE_ECDHE_SM4_CBC_SM3 must work with sm3, cbc, sm4, sm2, ecdh"
#endif
#endif
#if defined(HITLS_TLS_SUITE_ECC_SM4_CBC_SM3)
#if !defined(HITLS_CRYPTO_SM3) || !defined(HITLS_CRYPTO_CBC) || !defined(HITLS_CRYPTO_SM4) || \
!defined(HITLS_CRYPTO_SM2)
#error "[HiTLS] cipher suite HITLS_TLS_SUITE_ECC_SM4_CBC_SM3 must work with sm3, cbc, sm4, sm2"
#endif
#endif
#if defined(HITLS_TLS_SUITE_ECDHE_SM4_GCM_SM3)
#if !defined(HITLS_CRYPTO_SM3) || !defined(HITLS_CRYPTO_GCM) || !defined(HITLS_CRYPTO_SM4) || \
!defined(HITLS_CRYPTO_SM2) || !defined(HITLS_CRYPTO_ECDH)
#error "[HiTLS] cipher suite HITLS_TLS_SUITE_ECDHE_SM4_GCM_SM3 must work with sm3, gcm, sm4, sm2, ecdh"
#endif
#endif
#if defined(HITLS_TLS_SUITE_ECC_SM4_GCM_SM3)
#if !defined(HITLS_CRYPTO_SM3) || !defined(HITLS_CRYPTO_GCM) || !defined(HITLS_CRYPTO_SM4) || \
!defined(HITLS_CRYPTO_SM2)
#error "[HiTLS] cipher suite HITLS_TLS_SUITE_ECC_SM4_GCM_SM3 must work with sm3, gcm, sm4, sm2"
#endif
#endif
#if defined(HITLS_TLS_SUITE_AES_128_GCM_SHA256) || defined(HITLS_TLS_SUITE_AES_256_GCM_SHA384) || \
defined(HITLS_TLS_SUITE_CHACHA20_POLY1305_SHA256) || defined(HITLS_TLS_SUITE_AES_128_CCM_SHA256) || \
defined(HITLS_TLS_SUITE_AES_128_CCM_8_SHA256)
#if (!defined(HITLS_TLS_SUITE_AUTH_RSA) && !defined(HITLS_TLS_SUITE_AUTH_ECDSA) && \
!defined(HITLS_TLS_SUITE_AUTH_PSK))
#error "[HiTLS] tls13 ciphersuite must work with suite_auth_rsa or suite_auth_ecdsa or suite_auth_psk"
#endif
#endif
#endif /* HITLS_TLS */
#ifdef HITLS_CRYPTO
#if defined(HITLS_CRYPTO_HMAC) && !defined(HITLS_CRYPTO_MD)
#error "[HiTLS] The hmac must work with hash"
#endif
#if defined(HITLS_CRYPTO_DRBG_HASH) && !defined(HITLS_CRYPTO_MD)
#error "[HiTLS] The drbg_hash must work with hash"
#endif
#if defined(HITLS_CRYPTO_DRBG_CTR) && !defined(HITLS_CRYPTO_AES) && !defined(HITLS_CRYPTO_SM4)
#error "[HiTLS] AES or SM4 must be enabled for DRBG-CTR"
#endif
#if defined(HITLS_CRYPTO_ENTROPY) && !defined(HITLS_CRYPTO_DRBG)
#error "[HiTLS] The entropy must work with at leaset one drbg algorithm."
#endif
#if defined(HITLS_CRYPTO_DRBG_GM) && !defined(HITLS_CRYPTO_DRBG_CTR) && !defined(HITLS_CRYPTO_DRBG_HASH)
#error "[HiTLS]DRBG-HASH or DRBG-CTR must be enabled for DRBG-GM"
#endif
#if defined(HITLS_CRYPTO_ENTROPY_HARDWARE) && !defined(HITLS_CRYPTO_EALINIT)
#error "[HiTLS] ealinit must be enabled when the hardware entropy source is enabled."
#endif
#if defined(HITLS_CRYPTO_ENTROPY) && defined(HITLS_CRYPTO_DRBG_CTR) && !defined(HITLS_CRYPTO_DRBG_GM)
#if !defined(HITLS_CRYPTO_CMAC_AES)
#error "[HiTLS] Configure the conditioning function. Currently, CRYPT_MAC_CMAC_AES is supported. \
others may be supported in the future."
#endif
#endif
#if defined(HITLS_CRYPTO_BN) && !(defined(HITLS_THIRTY_TWO_BITS) || defined(HITLS_SIXTY_FOUR_BITS))
#error "[HiTLS] To use bn, the number of system bits must be specified first."
#endif
#if defined(HITLS_CRYPTO_HPKE)
#if !defined(HITLS_CRYPTO_AES) && !defined(HITLS_CRYPTO_CHACHA20POLY1305)
#error "[HiTLS] The hpke must work with aes or chacha20poly1305."
#endif
#if !defined(HITLS_CRYPTO_CHACHA20POLY1305) && defined(HITLS_CRYPTO_AES) && !defined(HITLS_CRYPTO_GCM)
#error "[HiTLS] The hpke must work with aes-gcm."
#endif
#if !defined(HITLS_CRYPTO_CURVE_NISTP256) && !defined(HITLS_CRYPTO_CURVE_NISTP384) && \
!defined(HITLS_CRYPTO_CURVE_NISTP521) && !defined(HITLS_CRYPTO_X25519)
#error "[HiTLS] The hpke must work with p256 or p384 or p521 or x25519."
#endif
#endif /* HITLS_CRYPTO_HPKE */
#if defined(HITLS_CRYPTO_RSA_BLINDING) && !(defined(HITLS_CRYPTO_BN_RAND))
#error "[HiTLS] The blind must work with bn_rand"
#endif
#if defined(HITLS_CRYPTO_RSA_SIGN) || defined(HITLS_CRYPTO_RSA_VERIFY)
#if !defined(HITLS_CRYPTO_RSA_EMSA_PSS) && !defined(HITLS_CRYPTO_RSA_EMSA_PKCSV15)
#error "[HiTLS] The rsa_sign/rsa_verify must work with rsa_emsa_pss/rsa_emsa_pkcsv15"
#endif
#endif
#if defined(HITLS_CRYPTO_RSA_ENCRYPT) || defined(HITLS_CRYPTO_RSA_DECRYPT)
#if !defined(HITLS_CRYPTO_RSAES_OAEP) && !defined(HITLS_CRYPTO_RSAES_PKCSV15) && \
!defined(HITLS_CRYPTO_RSAES_PKCSV15_TLS) && !defined(HITLS_CRYPTO_RSA_NO_PAD)
#error "[HiTLS] The rsa_encrypt/rsa_decrypt must work with rsaes_oaep/rsaes_pkcsv15/rsaes_pkcsv15_tls/rsa_no_pad"
#endif
#endif
#if defined(HITLS_CRYPTO_RSA_NO_PAD) || defined(HITLS_CRYPTO_RSAES_OAEP) || defined(HITLS_CRYPTO_RSAES_PKCSV15) || \
defined(HITLS_CRYPTO_RSAES_PKCSV15_TLS)
#if !defined(HITLS_CRYPTO_RSA_ENCRYPT) && !defined(HITLS_CRYPTO_RSA_DECRYPT)
#error "[HiTLS] The rsaes_oaep/rsaes_pkcsv15/rsaes_pkcsv15_tls/rsa_no_pad must work with rsa_encrypt/rsa_decrypt"
#endif
#endif
#if defined(HITLS_CRYPTO_RSA_EMSA_PSS) || defined(HITLS_CRYPTO_RSA_EMSA_PKCSV15)
#if !defined(HITLS_CRYPTO_RSA_SIGN) && !defined(HITLS_CRYPTO_RSA_VERIFY)
#error "[HiTLS] The rsa_emsa_pss/rsa_emsa_pkcsv15 must work with rsa_sign/rsa_verify"
#endif
#endif
#if defined(HITLS_CRYPTO_RSA_BLINDING) && !defined(HITLS_CRYPTO_RSA_SIGN) && !defined(HITLS_CRYPTO_RSA_DECRYPT)
#error "[HiTLS] The rsa_blinding must work with rsa_sign or rsa_decrypt"
#endif
#if defined(HITLS_CRYPTO_RSA_ENCRYPT) && (defined(HITLS_CRYPTO_RSAES_OAEP) || defined(HITLS_CRYPTO_RSAES_PKCSV15))
#ifndef HITLS_CRYPTO_DRBG
#error "[HiTLS] The rsa_encrypt+rsaes_oaep/rsa_pkcsv15 must work with a drbg algorithm."
#endif
#endif
#if defined(HITLS_CRYPTO_RSA_SIGN) && defined(HITLS_CRYPTO_RSA_EMSA_PSS) && !defined(HITLS_CRYPTO_DRBG)
#error "[HiTLS] The rsa_sign+rsa_emsa_pss must work with a drbg algorithm."
#endif
#if defined(HITLS_CRYPTO_RSA_GEN) && !(defined(HITLS_CRYPTO_BN_RAND) && defined(HITLS_CRYPTO_BN_PRIME))
#error "[HiTLS] The rsa_gen must work with bn_rand and bn_prime"
#endif
#if defined(HITLS_CRYPTO_ECDSA)
#if !defined(HITLS_CRYPTO_CURVE_NISTP224) && !defined(HITLS_CRYPTO_CURVE_NISTP256) && \
!defined(HITLS_CRYPTO_CURVE_NISTP384) && !defined(HITLS_CRYPTO_CURVE_NISTP521) && \
!defined(HITLS_CRYPTO_CURVE_BP256R1) && !defined(HITLS_CRYPTO_CURVE_BP384R1) && \
!defined(HITLS_CRYPTO_CURVE_BP512R1) && !defined(HITLS_CRYPTO_CURVE_192WAPI)
#error "[HiTLS] Nist curves or brainpool curves or 192Wapi curve must be enabled for ECDSA."
#endif
#endif
#if defined(HITLS_CRYPTO_ECDH)
#if !defined(HITLS_CRYPTO_CURVE_NISTP224) && !defined(HITLS_CRYPTO_CURVE_NISTP256) && \
!defined(HITLS_CRYPTO_CURVE_NISTP384) && !defined(HITLS_CRYPTO_CURVE_NISTP521) && \
!defined(HITLS_CRYPTO_CURVE_BP256R1) && !defined(HITLS_CRYPTO_CURVE_BP384R1) && \
!defined(HITLS_CRYPTO_CURVE_BP512R1) && !defined(HITLS_CRYPTO_CURVE_192WAPI)
#error "[HiTLS] Nist curves or brainpool curves must be enabled for ECDH."
#endif
#endif
#if defined(HITLS_CRYPTO_CMVP_INTEGRITY) && !defined(HITLS_CRYPTO_CMVP)
#error "[HiTLS] Integrity check must work with CMVP"
#endif
#if (defined(HITLS_CRYPTO_SHA1_ARMV8) || \
defined(HITLS_CRYPTO_SHA256_ARMV8) || defined(HITLS_CRYPTO_SHA224_ARMV8) || defined(HITLS_CRYPTO_SHA2_ARMV8) || \
defined(HITLS_CRYPTO_SM4_X8664)) && !defined(HITLS_CRYPTO_EALINIT)
#error "[HiTLS] ealinit must be enabled for sha1_armv8 or sha256_armv8 or sha224_armv8 or sm4_x8664."
#endif
#if defined(HITLS_CRYPTO_HYBRIDKEM)
#if !defined(HITLS_CRYPTO_X25519) && !defined(HITLS_CRYPTO_ECDH)
#error "[HiTLS] The hybrid must work with x25519 or ecdh."
#endif
#endif
#if defined(HITLS_CRYPTO_HMAC) && !defined(HITLS_CRYPTO_MD)
#error "[HiTLS] The hmac must work with hash."
#endif
#if defined(HITLS_CRYPTO_DRBG_HASH) && !defined(HITLS_CRYPTO_MD)
#error "[HiTLS] The drbg_hash must work with hash."
#endif
#if defined(HITLS_CRYPTO_ENTROPY) && !defined(HITLS_CRYPTO_DRBG)
#error "[HiTLS] The entropy must work with at leaset one drbg algorithm."
#endif
#if defined(HITLS_CRYPTO_PKEY) && !defined(HITLS_CRYPTO_MD)
#error "[HiTLS] The pkey must work with hash."
#endif
#if defined(HITLS_CRYPTO_BN) && !(defined(HITLS_THIRTY_TWO_BITS) || defined(HITLS_SIXTY_FOUR_BITS))
#error "[HiTLS] To use bn, the number of system bits must be specified first."
#endif
#ifdef HITLS_CRYPTO_KEY_EPKI
#if !defined(HITLS_CRYPTO_KEY_ENCODE) && !defined(HITLS_CRYPTO_KEY_DECODE)
#error "[HiTLS] The key encrypt must work with key gen or key parse."
#endif
#if !defined(HITLS_CRYPTO_DRBG)
#error "[HiTLS] The key encrypt must work with a drbg algorithm."
#endif
#if !defined(HITLS_CRYPTO_CIPHER)
#error "[HiTLS] The key encrypt must work with a symmetric algorithm."
#endif
#endif
#if defined(HITLS_CRYPTO_CODECSKEY) && (!defined(HITLS_CRYPTO_ECDSA) && !defined(HITLS_CRYPTO_SM2_SIGN) && \
!defined(HITLS_CRYPTO_SM2_CRYPT) && !defined(HITLS_CRYPTO_ED25519) && !defined(HITLS_CRYPTO_RSA_SIGN)) && \
!defined(HITLS_CRYPTO_RSA_VERIFY)
#error "[HiTLS] The encode must work with ecdsa or sm2_sign or sm2_crypt or ed25519 or rsa_sign or rsa_verify."
#endif
#endif /* HITLS_CRYPTO */
#ifdef HITLS_PKI
#if defined(HITLS_PKI_INFO) && !defined(HITLS_PKI_X509_CRT)
#error "[HiTLS] The info must work with x509_crt_gen or x509_crt_parse."
#endif
#endif /* HITLS_PKI */
#if defined(HITLS_TLS_FEATURE_ETM) && !defined(HITLS_TLS_SUITE_CIPHER_CBC)
#error "[HiTLS] The etm must work with cbc"
#endif
#endif /* HITLS_CONFIG_CHECK_H */
| 2302_82127028/openHiTLS-examples_1508 | config/macro_config/hitls_config_check.h | C | unknown | 38,606 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
/* Derivation of configuration features.
* The derivation type (rule) and sequence are as follows:
* 1. Parent features derive child features.
* 2. Derive the features of dependencies.
* For example, if feature a depends on features b and c, you need to derive features b and c.
* 3. Child features derive parent features.
* The high-level interfaces of the crypto module is controlled by the parent feature macro,
* if there is no parent feature, such interfaces will be unavailable.
*/
#ifndef HITLS_CONFIG_LAYER_BSL_H
#define HITLS_CONFIG_LAYER_BSL_H
/* BSL_INIT */
#if defined(HITLS_CRYPTO_EAL) && !defined(HITLS_BSL_INIT)
#define HITLS_BSL_INIT
#endif
#if defined(HITLS_BSL_INIT) && !defined(HITLS_BSL_ERR)
#define HITLS_BSL_ERR
#endif
#if defined(HITLS_BSL_UI) && !defined(HITLS_BSL_SAL_FILE)
#define HITLS_BSL_SAL_FILE
#endif
#ifdef HITLS_BSL_CONF
#ifndef HITLS_BSL_LIST
#define HITLS_BSL_LIST
#endif
#ifndef HITLS_BSL_UIO_FILE
#define HITLS_BSL_UIO_FILE
#endif
#endif
/* BSL_UIO */
/* Derive the child-features of uio. */
#ifdef HITLS_BSL_UIO
#ifndef HITLS_BSL_UIO_PLT
#define HITLS_BSL_UIO_PLT
#endif
#ifndef HITLS_BSL_UIO_BUFFER
#define HITLS_BSL_UIO_BUFFER
#endif
#ifndef HITLS_BSL_UIO_SCTP
#define HITLS_BSL_UIO_SCTP
#endif
#ifndef HITLS_BSL_UIO_UDP
#define HITLS_BSL_UIO_UDP
#endif
#ifndef HITLS_BSL_UIO_TCP
#define HITLS_BSL_UIO_TCP
#endif
#ifndef HITLS_BSL_UIO_MEM
#define HITLS_BSL_UIO_MEM
#endif
#ifndef HITLS_BSL_UIO_FILE
#define HITLS_BSL_UIO_FILE
#endif
#endif
#if defined(HITLS_BSL_UIO_FILE) && !defined(HITLS_BSL_SAL_FILE)
#define HITLS_BSL_SAL_FILE
#endif
/* Derive the child-features of uio mem. */
#if defined(HITLS_BSL_UIO_MEM)
#ifndef HITLS_BSL_SAL_MEM
#define HITLS_BSL_SAL_MEM
#endif
#ifndef HITLS_BSL_BUFFER
#define HITLS_BSL_BUFFER
#endif
#endif
/* Derive the dependency features of uio_tcp and uio_sctp. */
#if defined(HITLS_BSL_UIO_TCP) || defined(HITLS_BSL_UIO_SCTP)
#ifndef HITLS_BSL_SAL_NET
#define HITLS_BSL_SAL_NET
#endif
#endif
/* Derive parent feature from child features. */
#if defined(HITLS_BSL_UIO_BUFFER) || defined(HITLS_BSL_UIO_SCTP) || defined(HITLS_BSL_UIO_TCP) || \
defined(HITLS_BSL_UIO_MEM) || defined(HITLS_BSL_UIO_FILE)
#ifndef HITLS_BSL_UIO_PLT
#define HITLS_BSL_UIO_PLT
#endif
#endif
#ifdef HITLS_BSL_PEM
#ifndef HITLS_BSL_BASE64
#define HITLS_BSL_BASE64
#endif
#endif
#ifdef HITLS_BSL_ASN1
#ifndef HITLS_BSL_SAL_TIME
#define HITLS_BSL_SAL_TIME
#endif
#endif
#endif /* HITLS_CONFIG_LAYER_BSL_H */ | 2302_82127028/openHiTLS-examples_1508 | config/macro_config/hitls_config_layer_bsl.h | C | unknown | 3,285 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
/* Derivation of configuration features.
* The derivation type (rule) and sequence are as follows:
* 1. Parent features derive child features.
* 2. Derive the features of dependencies.
* For example, if feature a depends on features b and c, you need to derive features b and c.
* 3. Child features derive parent features.
* The high-level interfaces of the crypto module is controlled by the parent feature macro,
* if there is no parent feature, such interfaces will be unavailable.
*/
#ifndef HITLS_CONFIG_LAYER_CRYPTO_H
#define HITLS_CONFIG_LAYER_CRYPTO_H
#ifdef HITLS_CRYPTO_CODECS
#ifndef HITLS_CRYPTO_PROVIDER
#define HITLS_CRYPTO_PROVIDER
#endif
#endif
#if defined(HITLS_CRYPTO_CODECSKEY) && defined(HITLS_CRYPTO_PROVIDER)
#ifndef HITLS_CRYPTO_CODECS
#define HITLS_CRYPTO_CODECS
#endif
#endif
#ifdef HITLS_CRYPTO_CODECSKEY
#ifndef HITLS_CRYPTO_KEY_ENCODE
#define HITLS_CRYPTO_KEY_ENCODE
#endif
#ifndef HITLS_CRYPTO_KEY_DECODE
#define HITLS_CRYPTO_KEY_DECODE
#endif
#ifndef HITLS_CRYPTO_KEY_EPKI
#define HITLS_CRYPTO_KEY_EPKI
#endif
#ifndef HITLS_CRYPTO_KEY_INFO
#define HITLS_CRYPTO_KEY_INFO
#endif
#endif
#ifdef HITLS_CRYPTO_KEY_EPKI
#ifndef HITLS_CRYPTO_PBKDF2
#define HITLS_CRYPTO_PBKDF2
#endif
#endif
#ifdef HITLS_CRYPTO_KEY_INFO
#ifndef HITLS_BSL_PRINT
#define HITLS_BSL_PRINT
#endif
#endif
#if defined(HITLS_CRYPTO_KEY_ENCODE) || defined(HITLS_CRYPTO_KEY_DECODE) || defined(HITLS_CRYPTO_KEY_EPKI) || \
defined(HITLS_CRYPTO_KEY_INFO)
#ifndef HITLS_CRYPTO_CODECSKEY
#define HITLS_CRYPTO_CODECSKEY
#endif
#ifndef HITLS_BSL_ASN1
#define HITLS_BSL_ASN1
#endif
#ifndef HITLS_BSL_OBJ
#define HITLS_BSL_OBJ
#endif
#endif
#ifdef HITLS_CRYPTO_PROVIDER
#ifndef HITLS_BSL_PARAMS
#define HITLS_BSL_PARAMS
#endif
#endif
/* kdf */
#ifdef HITLS_CRYPTO_KDF
#ifndef HITLS_CRYPTO_PBKDF2
#define HITLS_CRYPTO_PBKDF2
#endif
#ifndef HITLS_CRYPTO_HKDF
#define HITLS_CRYPTO_HKDF
#endif
#ifndef HITLS_CRYPTO_KDFTLS12
#define HITLS_CRYPTO_KDFTLS12
#endif
#ifndef HITLS_CRYPTO_SCRYPT
#define HITLS_CRYPTO_SCRYPT
#endif
#endif
#ifdef HITLS_CRYPTO_HPKE
#ifndef HITLS_CRYPTO_HKDF
#define HITLS_CRYPTO_HKDF
#endif
#ifndef HITLS_BSL_PARAMS
#define HITLS_BSL_PARAMS
#endif
#endif
#ifdef HITLS_CRYPTO_SCRYPT
#ifndef HITLS_CRYPTO_SHA256
#define HITLS_CRYPTO_SHA256
#endif
#ifndef HITLS_CRYPTO_PBKDF2
#define HITLS_CRYPTO_PBKDF2
#endif
#endif
#if defined(HITLS_CRYPTO_PBKDF2) || defined(HITLS_CRYPTO_HKDF) || defined(HITLS_CRYPTO_KDFTLS12) || \
defined(HITLS_CRYPTO_SCRYPT)
#ifndef HITLS_CRYPTO_KDF
#define HITLS_CRYPTO_KDF
#endif
#ifndef HITLS_CRYPTO_HMAC
#define HITLS_CRYPTO_HMAC
#endif
#ifndef HITLS_BSL_PARAMS
#define HITLS_BSL_PARAMS
#endif
#endif
/* DRBG */
#if defined(HITLS_CRYPTO_ENTROPY) && !defined(HITLS_BSL_LIST)
#define HITLS_BSL_LIST
#endif
#if defined(HITLS_CRYPTO_ENTROPY) && !defined(HITLS_CRYPTO_ENTROPY_GETENTROPY) && \
!defined(HITLS_CRYPTO_ENTROPY_DEVRANDOM) && !defined(HITLS_CRYPTO_ENTROPY_SYS) && \
!defined(HITLS_CRYPTO_ENTROPY_HARDWARE)
#define HITLS_CRYPTO_ENTROPY_DEVRANDOM
#endif
#ifdef HITLS_CRYPTO_DRBG
#ifndef HITLS_CRYPTO_DRBG_HASH
#define HITLS_CRYPTO_DRBG_HASH
#endif
#ifndef HITLS_CRYPTO_DRBG_HMAC
#define HITLS_CRYPTO_DRBG_HMAC
#endif
#ifndef HITLS_CRYPTO_DRBG_CTR
#define HITLS_CRYPTO_DRBG_CTR
#endif
#endif
#if defined(HITLS_CRYPTO_DRBG_HMAC) && !defined(HITLS_CRYPTO_HMAC)
#define HITLS_CRYPTO_HMAC
#endif
#if defined(HITLS_CRYPTO_DRBG_HASH) || defined(HITLS_CRYPTO_DRBG_HMAC) || defined(HITLS_CRYPTO_DRBG_CTR)
#ifndef HITLS_CRYPTO_DRBG
#define HITLS_CRYPTO_DRBG
#endif
#ifndef HITLS_BSL_PARAMS
#define HITLS_BSL_PARAMS
#endif
#endif
#if defined(HITLS_CRYPTO_DRBG_GM)
#ifndef HITLS_BSL_SAL_TIME
#define HITLS_BSL_SAL_TIME
#endif
#endif
/* MAC */
#ifdef HITLS_CRYPTO_MAC
#ifndef HITLS_CRYPTO_HMAC
#define HITLS_CRYPTO_HMAC
#endif
#ifndef HITLS_CRYPTO_CMAC
#define HITLS_CRYPTO_CMAC
#endif
#ifndef HITLS_CRYPTO_GMAC
#define HITLS_CRYPTO_GMAC
#endif
#ifndef HITLS_CRYPTO_SIPHASH
#define HITLS_CRYPTO_SIPHASH
#endif
#ifndef HITLS_CRYPTO_CBC_MAC
#define HITLS_CRYPTO_CBC_MAC
#endif
#endif
#if defined(HITLS_CRYPTO_CBC_MAC) && !defined(HITLS_CRYPTO_SM4)
#define HITLS_CRYPTO_SM4
#endif
#ifdef HITLS_CRYPTO_GMAC
#ifndef HITLS_CRYPTO_EAL
#define HITLS_CRYPTO_EAL
#endif
#ifndef HITLS_CRYPTO_AES
#define HITLS_CRYPTO_AES
#endif
#ifndef HITLS_CRYPTO_GCM
#define HITLS_CRYPTO_GCM
#endif
#endif
#ifdef HITLS_CRYPTO_CMAC
#ifndef HITLS_CRYPTO_CMAC_AES
#define HITLS_CRYPTO_CMAC_AES
#endif
#ifndef HITLS_CRYPTO_CMAC_SM4
#define HITLS_CRYPTO_CMAC_SM4
#endif
#endif
#if defined(HITLS_CRYPTO_CMAC_AES) && !defined(HITLS_CRYPTO_AES)
#define HITLS_CRYPTO_AES
#endif
#if defined(HITLS_CRYPTO_CMAC_SM4) && !defined(HITLS_CRYPTO_SM4)
#define HITLS_CRYPTO_SM4
#endif
#if defined(HITLS_CRYPTO_CMAC_AES) || defined(HITLS_CRYPTO_CMAC_SM4)
#ifndef HITLS_CRYPTO_CMAC
#define HITLS_CRYPTO_CMAC
#endif
#endif
#if defined(HITLS_CRYPTO_HMAC) || defined(HITLS_CRYPTO_CMAC) || defined(HITLS_CRYPTO_GMAC) || \
defined(HITLS_CRYPTO_SIPHASH) || defined(HITLS_CRYPTO_CBC_MAC)
#ifndef HITLS_CRYPTO_MAC
#define HITLS_CRYPTO_MAC
#endif
#endif
/* CIPHER */
#ifdef HITLS_CRYPTO_CIPHER
#ifndef HITLS_CRYPTO_AES
#define HITLS_CRYPTO_AES
#endif
#ifndef HITLS_CRYPTO_SM4
#define HITLS_CRYPTO_SM4
#endif
#ifndef HITLS_CRYPTO_CHACHA20
#define HITLS_CRYPTO_CHACHA20
#endif
#endif
#if defined(HITLS_CRYPTO_CHACHA20) && !defined(HITLS_CRYPTO_CHACHA20POLY1305)
#define HITLS_CRYPTO_CHACHA20POLY1305
#endif
#if defined(HITLS_CRYPTO_AES) || defined(HITLS_CRYPTO_SM4) || defined(HITLS_CRYPTO_CHACHA20)
#ifndef HITLS_CRYPTO_CIPHER
#define HITLS_CRYPTO_CIPHER
#endif
#endif
/* MODES */
#ifdef HITLS_CRYPTO_MODES
#ifndef HITLS_CRYPTO_CTR
#define HITLS_CRYPTO_CTR
#endif
#ifndef HITLS_CRYPTO_CBC
#define HITLS_CRYPTO_CBC
#endif
#ifndef HITLS_CRYPTO_ECB
#define HITLS_CRYPTO_ECB
#endif
#ifndef HITLS_CRYPTO_GCM
#define HITLS_CRYPTO_GCM
#endif
#ifndef HITLS_CRYPTO_CCM
#define HITLS_CRYPTO_CCM
#endif
#ifndef HITLS_CRYPTO_XTS
#define HITLS_CRYPTO_XTS
#endif
#ifndef HITLS_CRYPTO_CFB
#define HITLS_CRYPTO_CFB
#endif
#ifndef HITLS_CRYPTO_OFB
#define HITLS_CRYPTO_OFB
#endif
#ifndef HITLS_CRYPTO_CHACHA20POLY1305
#define HITLS_CRYPTO_CHACHA20POLY1305
#endif
#endif
#if defined(HITLS_CRYPTO_CTR) || defined(HITLS_CRYPTO_CBC) || defined(HITLS_CRYPTO_ECB) || \
defined(HITLS_CRYPTO_GCM) || defined(HITLS_CRYPTO_CCM) || defined(HITLS_CRYPTO_XTS) || \
defined(HITLS_CRYPTO_CFB) || defined(HITLS_CRYPTO_OFB) || defined(HITLS_CRYPTO_CHACHA20POLY1305)
#ifndef HITLS_CRYPTO_MODES
#define HITLS_CRYPTO_MODES
#endif
#endif
/* PKEY */
#ifdef HITLS_CRYPTO_PKEY
#ifndef HITLS_CRYPTO_ECC
#define HITLS_CRYPTO_ECC
#endif
#ifndef HITLS_CRYPTO_DSA
#define HITLS_CRYPTO_DSA
#endif
#ifndef HITLS_CRYPTO_RSA
#define HITLS_CRYPTO_RSA
#endif
#ifndef HITLS_CRYPTO_DH
#define HITLS_CRYPTO_DH
#endif
#ifndef HITLS_CRYPTO_ECDSA
#define HITLS_CRYPTO_ECDSA
#endif
#ifndef HITLS_CRYPTO_ECDH
#define HITLS_CRYPTO_ECDH
#endif
#ifndef HITLS_CRYPTO_SM2
#define HITLS_CRYPTO_SM2
#endif
#ifndef HITLS_CRYPTO_CURVE25519
#define HITLS_CRYPTO_CURVE25519
#endif
#ifndef HITLS_CRYPTO_MLKEM
#define HITLS_CRYPTO_MLKEM
#endif
#ifndef HITLS_CRYPTO_MLDSA
#define HITLS_CRYPTO_MLDSA
#endif
#ifndef HITLS_CRYPTO_HYBRIDKEM
#define HITLS_CRYPTO_HYBRIDKEM
#endif
#ifndef HITLS_CRYPTO_PAILLIER
#define HITLS_CRYPTO_PAILLIER
#endif
#ifndef HITLS_CRYPTO_ELGAMAL
#define HITLS_CRYPTO_ELGAMAL
#endif
#ifndef HITLS_CRYPTO_SLH_DSA
#define HITLS_CRYPTO_SLH_DSA
#endif
#ifndef HITLS_CRYPTO_XMSS
#define HITLS_CRYPTO_XMSS
#endif
#endif
#ifdef HITLS_CRYPTO_RSA
#ifndef HITLS_CRYPTO_RSA_SIGN
#define HITLS_CRYPTO_RSA_SIGN
#endif
#ifndef HITLS_CRYPTO_RSA_VERIFY
#define HITLS_CRYPTO_RSA_VERIFY
#endif
#ifndef HITLS_CRYPTO_RSA_ENCRYPT
#define HITLS_CRYPTO_RSA_ENCRYPT
#endif
#ifndef HITLS_CRYPTO_RSA_DECRYPT
#define HITLS_CRYPTO_RSA_DECRYPT
#endif
#ifndef HITLS_CRYPTO_RSA_BLINDING
#define HITLS_CRYPTO_RSA_BLINDING
#endif
#ifndef HITLS_CRYPTO_RSA_GEN
#define HITLS_CRYPTO_RSA_GEN
#endif
#ifndef HITLS_CRYPTO_RSA_PAD
#define HITLS_CRYPTO_RSA_PAD
#endif
#ifndef HITLS_CRYPTO_RSA_BSSA
#define HITLS_CRYPTO_RSA_BSSA
#endif
#ifndef HITLS_CRYPTO_RSA_CHECK
#define HITLS_CRYPTO_RSA_CHECK
#endif
#endif
#ifdef HITLS_CRYPTO_RSA_BSSA
#ifndef HITLS_CRYPTO_RSA_EMSA_PSS
#define HITLS_CRYPTO_RSA_EMSA_PSS
#endif
#ifndef HITLS_CRYPTO_RSA_BLINDING
#define HITLS_CRYPTO_RSA_BLINDING
#endif
#endif
#ifdef HITLS_CRYPTO_RSA_GEN
#ifndef HITLS_CRYPTO_BN_RAND
#define HITLS_CRYPTO_BN_RAND
#endif
#ifndef HITLS_CRYPTO_BN_PRIME
#define HITLS_CRYPTO_BN_PRIME
#endif
#endif
#ifdef HITLS_CRYPTO_RSA_BLINDING
#ifndef HITLS_CRYPTO_BN_RAND
#define HITLS_CRYPTO_BN_RAND
#endif
#endif
#ifdef HITLS_CRYPTO_RSA_CHECK
#ifndef HITLS_CRYPTO_BN_RAND
#define HITLS_CRYPTO_BN_RAND
#endif
#endif
#ifdef HITLS_CRYPTO_RSA_PAD
#ifndef HITLS_CRYPTO_RSA_EMSA_PSS
#define HITLS_CRYPTO_RSA_EMSA_PSS
#endif
#ifndef HITLS_CRYPTO_RSA_EMSA_PKCSV15
#define HITLS_CRYPTO_RSA_EMSA_PKCSV15
#endif
#ifndef HITLS_CRYPTO_RSAES_OAEP
#define HITLS_CRYPTO_RSAES_OAEP
#endif
#ifndef HITLS_CRYPTO_RSAES_PKCSV15
#define HITLS_CRYPTO_RSAES_PKCSV15
#endif
#ifndef HITLS_CRYPTO_RSAES_PKCSV15_TLS
#define HITLS_CRYPTO_RSAES_PKCSV15_TLS
#endif
#ifndef HITLS_CRYPTO_RSA_NO_PAD
#define HITLS_CRYPTO_RSA_NO_PAD
#endif
#endif
#if defined(HITLS_CRYPTO_RSA_EMSA_PSS) || defined(HITLS_CRYPTO_RSA_EMSA_PKCSV15) || \
defined(HITLS_CRYPTO_RSAES_OAEP) || defined(HITLS_CRYPTO_RSAES_PKCSV15) || \
defined(HITLS_CRYPTO_RSAES_PKCSV15_TLS) || defined(HITLS_CRYPTO_RSA_NO_PAD)
#ifndef HITLS_CRYPTO_RSA_PAD
#define HITLS_CRYPTO_RSA_PAD
#endif
#endif
#if defined(HITLS_CRYPTO_RSA_SIGN) || defined(HITLS_CRYPTO_RSA_VERIFY) || \
defined(HITLS_CRYPTO_RSA_ENCRYPT) || defined(HITLS_CRYPTO_RSA_DECRYPT) || \
defined(HITLS_CRYPTO_RSA_BLINDING) || defined(HITLS_CRYPTO_RSA_PAD) || defined(HITLS_CRYPTO_RSA_GEN)
#ifndef HITLS_CRYPTO_RSA
#define HITLS_CRYPTO_RSA
#endif
// rsa common dependency
#ifndef HITLS_CRYPTO_BN_BASIC
#define HITLS_CRYPTO_BN_BASIC
#endif
#endif
#ifdef HITLS_CRYPTO_CURVE25519
#ifndef HITLS_CRYPTO_X25519
#define HITLS_CRYPTO_X25519
#endif
#ifndef HITLS_CRYPTO_ED25519
#define HITLS_CRYPTO_ED25519
#endif
#ifndef HITLS_CRYPTO_X25519_CHECK
#define HITLS_CRYPTO_X25519_CHECK
#endif
#ifndef HITLS_CRYPTO_ED25519_CHECK
#define HITLS_CRYPTO_ED25519_CHECK
#endif
#endif
#if defined(HITLS_CRYPTO_ED25519) && !defined(HITLS_CRYPTO_SHA512)
#define HITLS_CRYPTO_SHA512
#endif
#if defined(HITLS_CRYPTO_X25519) || defined(HITLS_CRYPTO_ED25519)
#ifndef HITLS_CRYPTO_CURVE25519
#define HITLS_CRYPTO_CURVE25519
#endif
#endif
#ifdef HITLS_CRYPTO_SM2
#ifndef HITLS_CRYPTO_SM2_SIGN
#define HITLS_CRYPTO_SM2_SIGN
#endif
#ifndef HITLS_CRYPTO_SM2_CRYPT
#define HITLS_CRYPTO_SM2_CRYPT
#endif
#ifndef HITLS_CRYPTO_SM2_EXCH
#define HITLS_CRYPTO_SM2_EXCH
#endif
#endif
#if defined(HITLS_CRYPTO_SM2_SIGN) || defined(HITLS_CRYPTO_SM2_CRYPT) || defined(HITLS_CRYPTO_SM2_EXCH)
#ifndef HITLS_CRYPTO_SM2
#define HITLS_CRYPTO_SM2
#endif
#endif
#ifdef HITLS_CRYPTO_SM2
#ifndef HITLS_CRYPTO_SM3
#define HITLS_CRYPTO_SM3
#endif
#ifndef HITLS_CRYPTO_CURVE_SM2
#define HITLS_CRYPTO_CURVE_SM2
#endif
#ifndef HITLS_CRYPTO_SM2_CHECK
#define HITLS_CRYPTO_SM2_CHECK
#endif
#endif
#ifdef HITLS_CRYPTO_XMSS
#ifndef HITLS_CRYPTO_SLH_DSA
#define HITLS_CRYPTO_SLH_DSA
#endif
#endif
#ifdef HITLS_CRYPTO_SLH_DSA
#ifndef HITLS_CRYPTO_SHA2
#define HITLS_CRYPTO_SHA2
#endif
#ifndef HITLS_CRYPTO_SHA3
#define HITLS_CRYPTO_SHA3
#endif
#ifndef HITLS_BSL_OBJ
#define HITLS_BSL_OBJ
#endif
#ifndef HITLS_CRYPTO_EAL
#define HITLS_CRYPTO_EAL
#endif
#ifndef HITLS_CRYPTO_HMAC
#define HITLS_CRYPTO_HMAC
#endif
#ifndef HITLS_CRYPTO_SHA256
#define HITLS_CRYPTO_SHA256
#endif
#ifndef HITLS_CRYPTO_SHA512
#define HITLS_CRYPTO_SHA512
#endif
#ifndef HITLS_CRYPTO_SLH_DSA_CHECK
#define HITLS_CRYPTO_SLH_DSA_CHECK
#endif
#endif
#if defined(HITLS_CRYPTO_MLDSA) || defined(HITLS_CRYPTO_ELGAMAL)
#ifndef HITLS_CRYPTO_BN_RAND
#define HITLS_CRYPTO_BN_RAND
#endif
#ifndef HITLS_CRYPTO_BN_PRIME
#define HITLS_CRYPTO_BN_PRIME
#endif
#endif
#ifdef HITLS_CRYPTO_HYBRIDKEM
#ifndef HITLS_CRYPTO_MLKEM
#define HITLS_CRYPTO_MLKEM
#endif
#endif
#ifdef HITLS_CRYPTO_MLKEM
#ifndef HITLS_CRYPTO_SHA3
#define HITLS_CRYPTO_SHA3
#endif
#ifndef HITLS_CRYPTO_KEM
#define HITLS_CRYPTO_KEM
#endif
#ifndef HITLS_CRYPTO_MLKEM_CHECK
#define HITLS_CRYPTO_MLKEM_CHECK
#endif
#endif
#ifdef HITLS_CRYPTO_MLDSA
#ifndef HITLS_CRYPTO_SHA3
#define HITLS_CRYPTO_SHA3
#endif
#ifndef HITLS_BSL_OBJ
#define HITLS_BSL_OBJ
#endif
#ifndef HITLS_CRYPTO_MLDSA_CHECK
#define HITLS_CRYPTO_MLDSA_CHECK
#endif
#endif
#ifdef HITLS_CRYPTO_ECC
#ifndef HITLS_CRYPTO_CURVE_NISTP224
#define HITLS_CRYPTO_CURVE_NISTP224
#endif
#ifndef HITLS_CRYPTO_CURVE_NISTP256
#define HITLS_CRYPTO_CURVE_NISTP256
#endif
#ifndef HITLS_CRYPTO_CURVE_NISTP384
#define HITLS_CRYPTO_CURVE_NISTP384
#endif
#ifndef HITLS_CRYPTO_CURVE_NISTP521
#define HITLS_CRYPTO_CURVE_NISTP521
#endif
#ifndef HITLS_CRYPTO_CURVE_BP256R1
#define HITLS_CRYPTO_CURVE_BP256R1
#endif
#ifndef HITLS_CRYPTO_CURVE_BP384R1
#define HITLS_CRYPTO_CURVE_BP384R1
#endif
#ifndef HITLS_CRYPTO_CURVE_BP512R1
#define HITLS_CRYPTO_CURVE_BP512R1
#endif
#ifndef HITLS_CRYPTO_CURVE_SM2
#define HITLS_CRYPTO_CURVE_SM2
#endif
#ifndef HITLS_CRYPTO_ECC_CHECK
#define HITLS_CRYPTO_ECC_CHECK
#endif
#endif
#if defined(HITLS_CRYPTO_CURVE_NISTP224) || defined(HITLS_CRYPTO_CURVE_NISTP256) || \
defined(HITLS_CRYPTO_CURVE_NISTP384) || defined(HITLS_CRYPTO_CURVE_NISTP521) || \
defined(HITLS_CRYPTO_CURVE_BP256R1) || defined(HITLS_CRYPTO_CURVE_BP384R1) || \
defined(HITLS_CRYPTO_CURVE_BP512R1) || defined(HITLS_CRYPTO_CURVE_SM2)
#ifndef HITLS_CRYPTO_ECC
#define HITLS_CRYPTO_ECC
#endif
#ifndef HITLS_CRYPTO_BN_RAND
#define HITLS_CRYPTO_BN_RAND
#endif
#endif
#if defined(HITLS_CRYPTO_NIST_ECC_ACCELERATE) && !defined(HITLS_CRYPTO_ECC)
#undef HITLS_CRYPTO_NIST_ECC_ACCELERATE // Avoid turning on unnecessary functions.
#endif
#if defined(HITLS_CRYPTO_NIST_ECC_ACCELERATE) && defined(__SIZEOF_INT128__) && (__SIZEOF_INT128__ == 16)
#define HITLS_CRYPTO_NIST_USE_ACCEL
#endif
#ifdef HITLS_CRYPTO_DSA_GEN_PARA
#ifndef HITLS_CRYPTO_DSA
#define HITLS_CRYPTO_DSA
#endif
#endif
#ifdef HITLS_CRYPTO_ECDH
#ifndef HITLS_CRYPTO_ECDH_CHECK
#define HITLS_CRYPTO_ECDH_CHECK
#endif
#endif
#ifdef HITLS_CRYPTO_ECDSA
#ifndef HITLS_CRYPTO_ECDSA_CHECK
#define HITLS_CRYPTO_ECDSA_CHECK
#endif
#endif
#ifdef HITLS_CRYPTO_DH
#ifndef HITLS_CRYPTO_DH_CHECK
#define HITLS_CRYPTO_DH_CHECK
#endif
#endif
#ifdef HITLS_CRYPTO_DSA
#ifndef HITLS_CRYPTO_DSA_CHECK
#define HITLS_CRYPTO_DSA_CHECK
#endif
#endif
#if defined(HITLS_CRYPTO_DSA) || defined(HITLS_CRYPTO_CURVE25519) || defined(HITLS_CRYPTO_RSA) || \
defined(HITLS_CRYPTO_DH) || defined(HITLS_CRYPTO_ECDSA) || defined(HITLS_CRYPTO_ECDH) || \
defined(HITLS_CRYPTO_SM2) || defined(HITLS_CRYPTO_PAILLIER)|| defined(HITLS_CRYPTO_ELGAMAL) || \
defined(HITLS_CRYPTO_MLDSA) || defined(HITLS_CRYPTO_MLKEM) || defined(HITLS_CRYPTO_HYBRIDKEM) || \
defined(HITLS_CRYPTO_SLH_DSA) || defined(HITLS_CRYPTO_XMSS)
#ifndef HITLS_CRYPTO_PKEY
#define HITLS_CRYPTO_PKEY
#endif
#ifndef HITLS_BSL_PARAMS
#define HITLS_BSL_PARAMS
#endif
#endif
/* bn */
#ifdef HITLS_CRYPTO_BN
#ifndef HITLS_CRYPTO_BN_BASIC
#define HITLS_CRYPTO_BN_BASIC
#endif
#ifndef HITLS_CRYPTO_BN_RAND
#define HITLS_CRYPTO_BN_RAND
#endif
#ifndef HITLS_CRYPTO_EAL_BN
#define HITLS_CRYPTO_EAL_BN
#endif
#ifndef HITLS_CRYPTO_BN_PRIME
#define HITLS_CRYPTO_BN_PRIME
#endif
#ifndef HITLS_CRYPTO_BN_STR_CONV
#define HITLS_CRYPTO_BN_STR_CONV
#endif
#ifndef HITLS_CRYPTO_BN_CB
#define HITLS_CRYPTO_BN_CB
#endif
#ifndef HITLS_CRYPTO_BN_PRIME_RFC3526
#define HITLS_CRYPTO_BN_PRIME_RFC3526
#endif
#endif
#if defined(HITLS_CRYPTO_BN_PRIME) && !defined(HITLS_CRYPTO_BN_RAND)
#define HITLS_CRYPTO_BN_RAND
#endif
#if defined(HITLS_CRYPTO_BN_RAND) || defined(HITLS_CRYPTO_EAL_BN) || defined(HITLS_CRYPTO_BN_PRIME) || \
defined(HITLS_CRYPTO_BN_STR_CONV) || defined(HITLS_CRYPTO_BN_CB) || defined(HITLS_CRYPTO_BN_PRIME_RFC3526) || \
defined(HITLS_CRYPTO_BN_BASIC)
#ifndef HITLS_CRYPTO_BN
#define HITLS_CRYPTO_BN
#endif
#endif
/* MD */
#ifdef HITLS_CRYPTO_MD
#ifndef HITLS_CRYPTO_MD5
#define HITLS_CRYPTO_MD5
#endif
#ifndef HITLS_CRYPTO_SM3
#define HITLS_CRYPTO_SM3
#endif
#ifndef HITLS_CRYPTO_SHA1
#define HITLS_CRYPTO_SHA1
#endif
#ifndef HITLS_CRYPTO_SHA2
#define HITLS_CRYPTO_SHA2
#endif
#ifndef HITLS_CRYPTO_SHA3
#define HITLS_CRYPTO_SHA3
#endif
#endif
#ifdef HITLS_CRYPTO_SHA2
#ifndef HITLS_CRYPTO_SHA224
#define HITLS_CRYPTO_SHA224
#endif
#ifndef HITLS_CRYPTO_SHA256
#define HITLS_CRYPTO_SHA256
#endif
#ifndef HITLS_CRYPTO_SHA384
#define HITLS_CRYPTO_SHA384
#endif
#ifndef HITLS_CRYPTO_SHA512
#define HITLS_CRYPTO_SHA512
#endif
#endif
#if defined(HITLS_CRYPTO_SHA224) && !defined(HITLS_CRYPTO_SHA256)
#define HITLS_CRYPTO_SHA256
#endif
#if defined(HITLS_CRYPTO_SHA384) && !defined(HITLS_CRYPTO_SHA512)
#define HITLS_CRYPTO_SHA512
#endif
#if defined(HITLS_CRYPTO_SHA256) || defined(HITLS_CRYPTO_SHA512)
#ifndef HITLS_CRYPTO_SHA2
#define HITLS_CRYPTO_SHA2
#endif
#endif
#if defined(HITLS_CRYPTO_MD5) || defined(HITLS_CRYPTO_SM3) || defined(HITLS_CRYPTO_SHA1) || \
defined(HITLS_CRYPTO_SHA2) || defined(HITLS_CRYPTO_SHA3)
#ifndef HITLS_CRYPTO_MD
#define HITLS_CRYPTO_MD
#endif
#endif
/* Assembling Macros */
#if defined(HITLS_CRYPTO_AES_X8664) || defined(HITLS_CRYPTO_AES_ARMV8)
#define HITLS_CRYPTO_AES_ASM
#endif
#if defined(HITLS_CRYPTO_CHACHA20_X8664) || defined(HITLS_CRYPTO_CHACHA20_ARMV8)
#define HITLS_CRYPTO_CHACHA20_ASM
#endif
#if defined(HITLS_CRYPTO_SM4_X8664) || defined(HITLS_CRYPTO_SM4_ARMV8)
#define HITLS_CRYPTO_SM4_ASM
#endif
#if defined(HITLS_CRYPTO_MODES_X8664)
#define HITLS_CRYPTO_CHACHA20POLY1305_X8664
#define HITLS_CRYPTO_GCM_X8664
#endif
#if defined(HITLS_CRYPTO_MODES_ARMV8)
#define HITLS_CRYPTO_CHACHA20POLY1305_ARMV8
#define HITLS_CRYPTO_GCM_ARMV8
#endif
#if defined(HITLS_CRYPTO_MODES_X8664) || defined(HITLS_CRYPTO_MODES_ARMV8)
#define HITLS_CRYPTO_MODES_ASM
#endif
#if defined(HITLS_CRYPTO_CHACHA20POLY1305_X8664) || defined(HITLS_CRYPTO_CHACHA20POLY1305_ARMV8)
#define HITLS_CRYPTO_CHACHA20POLY1305_ASM
#endif
#if defined(HITLS_CRYPTO_GCM_X8664) || defined(HITLS_CRYPTO_GCM_ARMV8)
#define HITLS_CRYPTO_GCM_ASM
#endif
#if defined(HITLS_CRYPTO_MD5_X8664)
#define HITLS_CRYPTO_MD5_ASM
#endif
#if defined(HITLS_CRYPTO_SHA1_X8664) || defined(HITLS_CRYPTO_SHA1_ARMV8)
#define HITLS_CRYPTO_SHA1_ASM
#endif
#if defined(HITLS_CRYPTO_SHA224_X8664) || defined(HITLS_CRYPTO_SHA224_ARMV8) || \
defined(HITLS_CRYPTO_SHA256_X8664) || defined(HITLS_CRYPTO_SHA256_ARMV8) || \
defined(HITLS_CRYPTO_SHA384_X8664) || defined(HITLS_CRYPTO_SHA384_ARMV8) || \
defined(HITLS_CRYPTO_SHA512_X8664) || defined(HITLS_CRYPTO_SHA512_ARMV8) || \
defined(HITLS_CRYPTO_SHA2_X8664) || defined(HITLS_CRYPTO_SHA2_ARMV8)
#define HITLS_CRYPTO_SHA2_ASM
#endif
#if defined(HITLS_CRYPTO_SM3_X8664) || defined(HITLS_CRYPTO_SM3_ARMV8)
#define HITLS_CRYPTO_SM3_ASM
#endif
#if defined(HITLS_CRYPTO_BN_X8664) || defined(HITLS_CRYPTO_BN_ARMV8)
#define HITLS_CRYPTO_BN_ASM
#endif
#if defined(HITLS_CRYPTO_ECC_X8664)
#define HITLS_CRYPTO_CURVE_NISTP256_X8664
#define HITLS_CRYPTO_CURVE_SM2_X8664
#endif
#if defined(HITLS_CRYPTO_ECC_ARMV8)
#define HITLS_CRYPTO_CURVE_NISTP256_ARMV8
#define HITLS_CRYPTO_CURVE_SM2_ARMV8
#endif
#if defined(HITLS_CRYPTO_ECC_X8664) || defined(HITLS_CRYPTO_ECC_ARMV8)
#define HITLS_CRYPTO_ECC_ASM
#endif
#if defined(HITLS_CRYPTO_CURVE_NISTP256_X8664) || defined(HITLS_CRYPTO_CURVE_NISTP256_ARMV8)
#define HITLS_CRYPTO_CURVE_NISTP256_ASM
#endif
#if defined(HITLS_CRYPTO_CURVE_NISTP384_X8664) || defined(HITLS_CRYPTO_CURVE_NISTP384_ARMV8)
#define HITLS_CRYPTO_CURVE_NISTP384_ASM
#endif
#if defined(HITLS_CRYPTO_CURVE_SM2_X8664) || defined(HITLS_CRYPTO_CURVE_SM2_ARMV8)
#define HITLS_CRYPTO_CURVE_SM2_ASM
#endif
#if (!defined(HITLS_SIXTY_FOUR_BITS))
#if (((defined(HITLS_CRYPTO_CURVE_NISTP224) || defined(HITLS_CRYPTO_CURVE_NISTP521)) && \
!defined(HITLS_CRYPTO_NIST_USE_ACCEL)) || \
defined(HITLS_CRYPTO_CURVE_NISTP384) || \
(defined(HITLS_CRYPTO_CURVE_NISTP256) && !defined(HITLS_CRYPTO_CURVE_NISTP256_ASM) && \
(!defined(HITLS_CRYPTO_NIST_ECC_ACCELERATE)) && (!defined(HITLS_CRYPTO_NIST_USE_ACCEL))) || \
(defined(HITLS_CRYPTO_CURVE_SM2) && !defined(HITLS_CRYPTO_CURVE_SM2_ASM)))
#define HITLS_CRYPTO_CURVE_MONT_NIST
#endif
#endif
#if defined(HITLS_CRYPTO_CURVE_BP256R1) || defined(HITLS_CRYPTO_CURVE_BP384R1) || \
defined(HITLS_CRYPTO_CURVE_BP512R1)
#define HITLS_CRYPTO_CURVE_MONT_PRIME
#endif
#if defined(HITLS_CRYPTO_CURVE_MONT_PRIME) || defined(HITLS_CRYPTO_CURVE_MONT_NIST)
#define HITLS_CRYPTO_CURVE_MONT
#endif
#if defined(HITLS_CRYPTO_ECDSA_CHECK) || defined(HITLS_CRYPTO_ECDH_CHECK) || defined(HITLS_CRYPTO_SM2_CHECK)
#ifndef HITLS_CRYPTO_ECC_CHECK
#define HITLS_CRYPTO_ECC_CHECK
#endif
#endif
#endif /* HITLS_CONFIG_LAYER_CRYPTO_H */
| 2302_82127028/openHiTLS-examples_1508 | config/macro_config/hitls_config_layer_crypto.h | C | unknown | 24,120 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#ifndef HITLS_CONFIG_LAYER_PKI_H
#define HITLS_CONFIG_LAYER_PKI_H
#ifdef HITLS_PKI_PKCS12
#ifndef HITLS_PKI_PKCS12_GEN
#define HITLS_PKI_PKCS12_GEN
#endif
#ifndef HITLS_PKI_PKCS12_PARSE
#define HITLS_PKI_PKCS12_PARSE
#endif
#endif
#ifdef HITLS_PKI_PKCS12_GEN
#ifndef HITLS_PKI_X509_CRT_GEN
#define HITLS_PKI_X509_CRT_GEN
#endif
#ifndef HITLS_PKI_X509_CRT_PARSE
#define HITLS_PKI_X509_CRT_PARSE
#endif
#ifndef HITLS_CRYPTO_KEY_ENCODE
#define HITLS_CRYPTO_KEY_ENCODE
#endif
#endif
#ifdef HITLS_PKI_PKCS12_PARSE
#ifndef HITLS_PKI_X509_CRT_PARSE
#define HITLS_PKI_X509_CRT_PARSE
#endif
#ifndef HITLS_CRYPTO_KEY_DECODE
#define HITLS_CRYPTO_KEY_DECODE
#endif
#endif
#if defined(HITLS_PKI_PKCS12_GEN) || defined(HITLS_PKI_PKCS12_PARSE)
#ifndef HITLS_PKI_PKCS12
#define HITLS_PKI_PKCS12
#endif
#ifndef HITLS_CRYPTO_KEY_EPKI
#define HITLS_CRYPTO_KEY_EPKI
#endif
#endif
#ifdef HITLS_PKI_X509
#ifndef HITLS_PKI_X509_CRT
#define HITLS_PKI_X509_CRT
#endif
#ifndef HITLS_PKI_X509_CSR
#define HITLS_PKI_X509_CSR
#endif
#ifndef HITLS_PKI_X509_CRL
#define HITLS_PKI_X509_CRL
#endif
#ifndef HITLS_PKI_X509_VFY
#define HITLS_PKI_X509_VFY
#endif
#endif
#ifdef HITLS_PKI_X509_VFY
#ifndef HITLS_PKI_X509_CRT_PARSE
#define HITLS_PKI_X509_CRT_PARSE
#endif
#ifndef HITLS_PKI_X509_CRL_PARSE
#define HITLS_PKI_X509_CRL_PARSE
#endif
#endif
#ifdef HITLS_PKI_X509_CRT
#ifndef HITLS_PKI_X509_CRT_GEN
#define HITLS_PKI_X509_CRT_GEN
#endif
#ifndef HITLS_PKI_X509_CRT_PARSE
#define HITLS_PKI_X509_CRT_PARSE
#endif
#endif
#if defined(HITLS_PKI_X509_CRT_GEN) || defined(HITLS_PKI_X509_CRT_PARSE)
#ifndef HITLS_PKI_X509_CRT
#define HITLS_PKI_X509_CRT
#endif
#endif
#ifdef HITLS_PKI_X509_CSR
#ifndef HITLS_PKI_X509_CSR_GEN
#define HITLS_PKI_X509_CSR_GEN
#endif
#ifndef HITLS_PKI_X509_CSR_PARSE
#define HITLS_PKI_X509_CSR_PARSE
#endif
#endif
#if defined(HITLS_PKI_X509_CSR_GEN) || defined(HITLS_PKI_X509_CSR_PARSE)
#ifndef HITLS_PKI_X509_CSR
#define HITLS_PKI_X509_CSR
#endif
#endif
#ifdef HITLS_PKI_X509_CRL
#ifndef HITLS_PKI_X509_CRL_GEN
#define HITLS_PKI_X509_CRL_GEN
#endif
#ifndef HITLS_PKI_X509_CRL_PARSE
#define HITLS_PKI_X509_CRL_PARSE
#endif
#endif
#if defined(HITLS_PKI_X509_CRL_GEN) || defined(HITLS_PKI_X509_CRL_PARSE)
#ifndef HITLS_PKI_X509_CRL
#define HITLS_PKI_X509_CRL
#endif
#endif
#if defined(HITLS_PKI_X509_CRT) || defined(HITLS_PKI_X509_CSR) || defined(HITLS_PKI_X509_CRL) || \
defined(HITLS_PKI_X509_VFY)
#ifndef HITLS_PKI_X509
#define HITLS_PKI_X509
#endif
#endif
#if defined(HITLS_PKI_X509_CRT_GEN) || defined(HITLS_PKI_X509_CSR_GEN) || defined(HITLS_PKI_X509_CRL_GEN) || \
defined(HITLS_PKI_PKCS12_GEN)
#ifndef HITLS_CRYPTO_KEY_ENCODE
#define HITLS_CRYPTO_KEY_ENCODE
#endif
#endif
#if defined(HITLS_PKI_X509_CRT_PARSE) || defined(HITLS_PKI_X509_CSR_PARSE) || defined(HITLS_PKI_X509_CRL_PARSE) || \
defined(HITLS_PKI_PKCS12_PARSE)
#ifndef HITLS_CRYPTO_KEY_DECODE
#define HITLS_CRYPTO_KEY_DECODE
#endif
#endif
#ifdef HITLS_PKI_INFO
#ifndef HITLS_BSL_UIO_PLT
#define HITLS_BSL_UIO_PLT
#endif
#endif
// Common dependencies
#ifndef HITLS_BSL_LIST
#define HITLS_BSL_LIST
#endif
#ifndef HITLS_BSL_OBJ
#define HITLS_BSL_OBJ
#endif
#ifndef HITLS_BSL_ASN1
#define HITLS_BSL_ASN1
#endif
#endif /* HITLS_CONFIG_LAYER_PKI_H */
| 2302_82127028/openHiTLS-examples_1508 | config/macro_config/hitls_config_layer_pki.h | C | unknown | 4,233 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
/* Derivation of configuration features.
* The derivation type (rule) and sequence are as follows:
* 1. Parent features derive child features.
* 2. Derive the features of dependencies.
* For example, if feature a depends on features b and c, you need to derive features b and c.
* 3. Child features derive parent features.
* The high-level interfaces of the crypto module is controlled by the parent feature macro,
* if there is no parent feature, such interfaces will be unavailable.
*/
#ifndef HITLS_CONFIG_LAYER_TLS_H
#define HITLS_CONFIG_LAYER_TLS_H
// version
#ifdef HITLS_TLS_PROTO_VERSION
#ifndef HITLS_TLS_PROTO_TLS12
#define HITLS_TLS_PROTO_TLS12
#endif
#ifndef HITLS_TLS_PROTO_TLS13
#define HITLS_TLS_PROTO_TLS13
#endif
#ifndef HITLS_TLS_PROTO_TLCP11
#define HITLS_TLS_PROTO_TLCP11
#endif
#ifndef HITLS_TLS_PROTO_DTLCP11
#define HITLS_TLS_PROTO_DTLCP11
#endif
#ifndef HITLS_TLS_PROTO_DTLS12
#define HITLS_TLS_PROTO_DTLS12
#endif
#endif
#if defined(HITLS_TLS_PROTO_DTLCP11)
#ifndef HITLS_TLS_PROTO_DTLS12
#define HITLS_TLS_PROTO_DTLS12
#endif
#ifndef HITLS_TLS_PROTO_TLCP11
#define HITLS_TLS_PROTO_TLCP11
#endif
#endif
#if defined(HITLS_TLS_PROTO_TLS12) || defined(HITLS_TLS_PROTO_TLS13) || defined(HITLS_TLS_PROTO_TLCP11)
#ifndef HITLS_TLS_PROTO_TLS
#define HITLS_TLS_PROTO_TLS
#endif
#endif
#if defined(HITLS_TLS_PROTO_TLS12) || defined(HITLS_TLS_PROTO_TLCP11)
#ifndef HITLS_TLS_PROTO_TLS_BASIC
#define HITLS_TLS_PROTO_TLS_BASIC
#endif
#endif
#if defined(HITLS_TLS_PROTO_DTLS12)
#ifndef HITLS_TLS_PROTO_DTLS
#define HITLS_TLS_PROTO_DTLS
#endif
#endif
#if defined(HITLS_TLS_PROTO_TLS12) && defined(HITLS_TLS_PROTO_TLS13)
#ifndef HITLS_TLS_PROTO_ALL
#define HITLS_TLS_PROTO_ALL
#endif
#endif
// host
#ifdef HITLS_TLS_HOST
#ifndef HITLS_TLS_HOST_SERVER
#define HITLS_TLS_HOST_SERVER
#endif
#ifndef HITLS_TLS_HOST_CLIENT
#define HITLS_TLS_HOST_CLIENT
#endif
#endif
#if defined(HITLS_TLS_HOST_SERVER) || defined(HITLS_TLS_HOST_CLIENT)
#ifndef HITLS_TLS_HOST
#define HITLS_TLS_HOST
#endif
#endif
// callback
#ifdef HITLS_TLS_CALLBACK
#ifndef HITLS_TLS_FEATURE_PROVIDER
#define HITLS_TLS_FEATURE_PROVIDER
#endif
#ifndef HITLS_TLS_CALLBACK_SAL
#define HITLS_TLS_CALLBACK_SAL
#endif
#ifndef HITLS_TLS_CALLBACK_CERT
#define HITLS_TLS_CALLBACK_CERT
#endif
#ifndef HITLS_TLS_CALLBACK_CRYPT
#define HITLS_TLS_CALLBACK_CRYPT
#endif
#endif
#if defined(HITLS_TLS_FEATURE_PROVIDER)
#ifdef HITLS_TLS_CALLBACK_SAL
#undef HITLS_TLS_CALLBACK_SAL
#endif
#ifdef HITLS_TLS_CALLBACK_CERT
#undef HITLS_TLS_CALLBACK_CERT
#endif
#ifdef HITLS_TLS_CALLBACK_CRYPT
#undef HITLS_TLS_CALLBACK_CRYPT
#endif
#endif
#if defined(HITLS_TLS_CALLBACK_CERT) || defined(HITLS_TLS_CALLBACK_CRYPT)
#ifndef HITLS_TLS_CALLBACK_SAL
#define HITLS_TLS_CALLBACK_SAL
#endif
#endif
#ifdef HITLS_TLS_FEATURE_PROVIDER
#ifndef HITLS_BSL_HASH
#define HITLS_BSL_HASH
#endif
#endif
#if !defined(HITLS_TLS_FEATURE_PROVIDER) && !defined(HITLS_TLS_CALLBACK_SAL)
#define HITLS_TLS_FEATURE_PROVIDER
#endif
// feature
#ifdef HITLS_TLS_FEATURE
#ifndef HITLS_TLS_FEATURE_RENEGOTIATION
#define HITLS_TLS_FEATURE_RENEGOTIATION
#endif
#ifndef HITLS_TLS_FEATURE_ALPN
#define HITLS_TLS_FEATURE_ALPN
#endif
#ifndef HITLS_TLS_FEATURE_SNI
#define HITLS_TLS_FEATURE_SNI
#endif
#ifndef HITLS_TLS_FEATURE_PHA
#define HITLS_TLS_FEATURE_PHA
#endif
#ifndef HITLS_TLS_FEATURE_PSK
#define HITLS_TLS_FEATURE_PSK
#endif
#ifndef HITLS_TLS_FEATURE_SECURITY
#define HITLS_TLS_FEATURE_SECURITY
#endif
#ifndef HITLS_TLS_FEATURE_INDICATOR
#define HITLS_TLS_FEATURE_INDICATOR
#endif
#ifndef HITLS_TLS_FEATURE_SESSION
#define HITLS_TLS_FEATURE_SESSION
#endif
#ifndef HITLS_TLS_FEATURE_KEY_UPDATE
#define HITLS_TLS_FEATURE_KEY_UPDATE
#endif
#ifndef HITLS_TLS_FEATURE_FLIGHT
#define HITLS_TLS_FEATURE_FLIGHT
#endif
#ifndef HITLS_TLS_FEATURE_CERT_MODE
#define HITLS_TLS_FEATURE_CERT_MODE
#endif
#ifndef HITLS_TLS_FEATURE_MODE
#define HITLS_TLS_FEATURE_MODE
#endif
#ifndef HITLS_TLS_FEATURE_KEM
#define HITLS_TLS_FEATURE_KEM
#endif
#ifndef HITLS_TLS_FEATURE_CLIENT_HELLO_CB
#define HITLS_TLS_FEATURE_CLIENT_HELLO_CB
#endif
#ifndef HITLS_TLS_FEATURE_CERT_CB
#define HITLS_TLS_FEATURE_CERT_CB
#endif
#ifndef HITLS_TLS_FEATURE_MAX_SEND_FRAGMENT
#define HITLS_TLS_FEATURE_MAX_SEND_FRAGMENT
#endif
#ifndef HITLS_TLS_FEATURE_REC_INBUFFER_SIZE
#define HITLS_TLS_FEATURE_REC_INBUFFER_SIZE
#endif
#ifndef HITLS_TLS_FEATURE_CUSTOM_EXTENSION
#define HITLS_TLS_FEATURE_CUSTOM_EXTENSION
#endif
#ifndef HITLS_TLS_FEATURE_CERTIFICATE_AUTHORITIES
#define HITLS_TLS_FEATURE_CERTIFICATE_AUTHORITIES
#endif
#endif /* HITLS_TLS_FEATURE */
#ifdef HITLS_TLS_FEATURE_SESSION
#ifndef HITLS_TLS_FEATURE_SESSION_TICKET
#define HITLS_TLS_FEATURE_SESSION_TICKET
#endif
#ifndef HITLS_TLS_FEATURE_SESSION_ID
#define HITLS_TLS_FEATURE_SESSION_ID
#endif
#endif
#ifdef HITLS_TLS_FEATURE_MODE
#ifndef HITLS_TLS_FEATURE_MODE_FALL_BACK_SCSV
#define HITLS_TLS_FEATURE_MODE_FALL_BACK_SCSV
#endif
#ifndef HITLS_TLS_FEATURE_MODE_AUTO_RETRY
#define HITLS_TLS_FEATURE_MODE_AUTO_RETRY
#endif
#ifndef HITLS_TLS_FEATURE_MODE_ACCEPT_MOVING_WRITE_BUFFER
#define HITLS_TLS_FEATURE_MODE_ACCEPT_MOVING_WRITE_BUFFER
#endif
#ifndef HITLS_TLS_FEATURE_MODE_RELEASE_BUFFERS
#define HITLS_TLS_FEATURE_MODE_RELEASE_BUFFERS
#endif
#endif
#if defined(HITLS_TLS_FEATURE_MODE_FALL_BACK_SCSV) || defined(HITLS_TLS_FEATURE_MODE_AUTO_RETRY) || \
defined(HITLS_TLS_FEATURE_MODE_ACCEPT_MOVING_WRITE_BUFFER) || defined(HITLS_TLS_FEATURE_MODE_RELEASE_BUFFERS)
#ifndef HITLS_TLS_FEATURE_MODE
#define HITLS_TLS_FEATURE_MODE
#endif
#endif
#if defined(HITLS_TLS_FEATURE_SESSION_TICKET) || defined(HITLS_TLS_FEATURE_SESSION_ID)
#ifndef HITLS_TLS_FEATURE_SESSION
#define HITLS_TLS_FEATURE_SESSION
#endif
#endif
#ifdef HITLS_TLS_FEATURE_SECURITY
#ifndef HITLS_TLS_CONFIG_CIPHER_SUITE
#define HITLS_TLS_CONFIG_CIPHER_SUITE
#endif
#endif
// proto
#ifdef HITLS_TLS_PROTO
#ifndef HITLS_BSL_TLV
#define HITLS_BSL_TLV
#endif
#ifndef HITLS_BSL_SAL
#define HITLS_BSL_SAL
#endif
#ifndef HITLS_CRYPTO_EAL
#define HITLS_CRYPTO_EAL
#endif
#endif
// suite_cipher
#ifdef HITLS_TLS_SUITE_CIPHER
#ifndef HITLS_TLS_SUITE_CIPHER_AEAD
#define HITLS_TLS_SUITE_CIPHER_AEAD
#endif
#ifndef HITLS_TLS_SUITE_CIPHER_CBC
#define HITLS_TLS_SUITE_CIPHER_CBC
#endif
#endif
// KX
#ifdef HITLS_TLS_SUITE_KX
#ifndef HITLS_TLS_SUITE_KX_ECDHE
#define HITLS_TLS_SUITE_KX_ECDHE
#endif
#ifndef HITLS_TLS_SUITE_KX_DHE
#define HITLS_TLS_SUITE_KX_DHE
#endif
#ifndef HITLS_TLS_SUITE_KX_ECDH
#define HITLS_TLS_SUITE_KX_ECDH
#endif
#ifndef HITLS_TLS_SUITE_KX_DH
#define HITLS_TLS_SUITE_KX_DH
#endif
#ifndef HITLS_TLS_SUITE_KX_RSA
#define HITLS_TLS_SUITE_KX_RSA
#endif
#endif
// AUTH
#ifdef HITLS_TLS_SUITE_AUTH
#ifndef HITLS_TLS_SUITE_AUTH_RSA
#define HITLS_TLS_SUITE_AUTH_RSA
#endif
#ifndef HITLS_TLS_SUITE_AUTH_ECDSA
#define HITLS_TLS_SUITE_AUTH_ECDSA
#endif
#ifndef HITLS_TLS_SUITE_AUTH_DSS
#define HITLS_TLS_SUITE_AUTH_DSS
#endif
#ifndef HITLS_TLS_SUITE_AUTH_PSK
#define HITLS_TLS_SUITE_AUTH_PSK
#endif
#ifndef HITLS_TLS_SUITE_AUTH_SM2
#define HITLS_TLS_SUITE_AUTH_SM2
#endif
#endif
// MAINTAIN
#ifdef HITLS_TLS_MAINTAIN
#ifndef HITLS_TLS_MAINTAIN_KEYLOG
#define HITLS_TLS_MAINTAIN_KEYLOG
#endif
#endif
#ifdef HITLS_TLS_CONFIG
#ifndef HITLS_TLS_CONFIG_MANUAL_DH
#define HITLS_TLS_CONFIG_MANUAL_DH
#endif
#ifndef HITLS_TLS_CONFIG_CERT
#define HITLS_TLS_CONFIG_CERT
#endif
#ifndef HITLS_TLS_CONFIG_KEY_USAGE
#define HITLS_TLS_CONFIG_KEY_USAGE
#endif
#ifndef HITLS_TLS_CONFIG_INFO
#define HITLS_TLS_CONFIG_INFO
#endif
#ifndef HITLS_TLS_CONFIG_STATE
#define HITLS_TLS_CONFIG_STATE
#endif
#ifndef HITLS_TLS_CONFIG_RECORD_PADDING
#define HITLS_TLS_CONFIG_RECORD_PADDING
#endif
#ifndef HITLS_TLS_CONFIG_USER_DATA
#define HITLS_TLS_CONFIG_USER_DATA
#endif
#ifndef HITLS_TLS_CONFIG_CIPHER_SUITE
#define HITLS_TLS_CONFIG_CIPHER_SUITE
#endif
#endif
#ifdef HITLS_TLS_CONNECTION
#ifndef HITLS_TLS_CONNECTION_INFO_NEGOTIATION
#define HITLS_TLS_CONNECTION_INFO_NEGOTIATION
#endif
#endif
#ifdef HITLS_TLS_CONNECTION_INFO_NEGOTIATION
#ifndef HITLS_TLS_CONNECTION
#define HITLS_TLS_CONNECTION
#endif
#endif
#ifdef HITLS_TLS_CONFIG_CERT
#ifndef HITLS_TLS_CONFIG_CERT_LOAD_FILE
#define HITLS_TLS_CONFIG_CERT_LOAD_FILE
#endif
#ifndef HITLS_TLS_CONFIG_CERT_CALLBACK
#define HITLS_TLS_CONFIG_CERT_CALLBACK
#endif
#ifndef HITLS_TLS_CONFIG_CERT_BUILD_CHAIN
#define HITLS_TLS_CONFIG_CERT_BUILD_CHAIN
#endif
#endif
#if defined(HITLS_TLS_PROTO_DTLS12) || defined(HITLS_TLS_PROTO_TLS13)
#ifndef HITLS_TLS_EXTENSION_COOKIE
#define HITLS_TLS_EXTENSION_COOKIE
#endif
#endif
#if defined(HITLS_TLS_SUITE_CIPHER_AEAD) && defined(HITLS_TLS_SUITE_KX_ECDHE) && defined(HITLS_TLS_SUITE_AUTH_RSA)
#if !defined(HITLS_TLS_SUITE_AES_128_GCM_SHA256) && defined(HITLS_TLS_PROTO_TLS13)
#define HITLS_TLS_SUITE_AES_128_GCM_SHA256
#endif
#if !defined(HITLS_TLS_SUITE_AES_256_GCM_SHA384) && defined(HITLS_TLS_PROTO_TLS13)
#define HITLS_TLS_SUITE_AES_256_GCM_SHA384
#endif
#if !defined(HITLS_TLS_SUITE_CHACHA20_POLY1305_SHA256) && defined(HITLS_TLS_PROTO_TLS13)
#define HITLS_TLS_SUITE_CHACHA20_POLY1305_SHA256
#endif
#if !defined(HITLS_TLS_SUITE_AES_128_CCM_SHA256) && defined(HITLS_TLS_PROTO_TLS13)
#define HITLS_TLS_SUITE_AES_128_CCM_SHA256
#endif
#if !defined(HITLS_TLS_SUITE_AES_128_CCM_8_SHA256) && defined(HITLS_TLS_PROTO_TLS13)
#define HITLS_TLS_SUITE_AES_128_CCM_8_SHA256
#endif
#ifndef HITLS_TLS_SUITE_ECDHE_RSA_WITH_AES_128_GCM_SHA256
#define HITLS_TLS_SUITE_ECDHE_RSA_WITH_AES_128_GCM_SHA256
#endif
#ifndef HITLS_TLS_SUITE_ECDHE_RSA_WITH_AES_256_GCM_SHA384
#define HITLS_TLS_SUITE_ECDHE_RSA_WITH_AES_256_GCM_SHA384
#endif
#ifndef HITLS_TLS_SUITE_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256
#define HITLS_TLS_SUITE_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256
#endif
#endif
#if defined(HITLS_TLS_SUITE_CIPHER_AEAD) && defined(HITLS_TLS_SUITE_KX_ECDHE) && defined(HITLS_TLS_SUITE_AUTH_ECDSA)
#if !defined(HITLS_TLS_SUITE_AES_128_GCM_SHA256) && defined(HITLS_TLS_PROTO_TLS13)
#define HITLS_TLS_SUITE_AES_128_GCM_SHA256
#endif
#if !defined(HITLS_TLS_SUITE_AES_256_GCM_SHA384) && defined(HITLS_TLS_PROTO_TLS13)
#define HITLS_TLS_SUITE_AES_256_GCM_SHA384
#endif
#if !defined(HITLS_TLS_SUITE_CHACHA20_POLY1305_SHA256) && defined(HITLS_TLS_PROTO_TLS13)
#define HITLS_TLS_SUITE_CHACHA20_POLY1305_SHA256
#endif
#if !defined(HITLS_TLS_SUITE_AES_128_CCM_SHA256) && defined(HITLS_TLS_PROTO_TLS13)
#define HITLS_TLS_SUITE_AES_128_CCM_SHA256
#endif
#if !defined(HITLS_TLS_SUITE_AES_128_CCM_8_SHA256) && defined(HITLS_TLS_PROTO_TLS13)
#define HITLS_TLS_SUITE_AES_128_CCM_8_SHA256
#endif
#ifndef HITLS_TLS_SUITE_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256
#define HITLS_TLS_SUITE_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256
#endif
#ifndef HITLS_TLS_SUITE_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384
#define HITLS_TLS_SUITE_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384
#endif
#ifndef HITLS_TLS_SUITE_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256
#define HITLS_TLS_SUITE_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256
#endif
#ifndef HITLS_TLS_SUITE_ECDHE_ECDSA_WITH_AES_128_CCM
#define HITLS_TLS_SUITE_ECDHE_ECDSA_WITH_AES_128_CCM
#endif
#ifndef HITLS_TLS_SUITE_ECDHE_ECDSA_WITH_AES_256_CCM
#define HITLS_TLS_SUITE_ECDHE_ECDSA_WITH_AES_256_CCM
#endif
#endif
#if defined(HITLS_TLS_SUITE_CIPHER_AEAD) && defined(HITLS_TLS_SUITE_KX_ECDHE) && defined(HITLS_TLS_SUITE_AUTH_PSK)
#if !defined(HITLS_TLS_SUITE_AES_128_GCM_SHA256) && defined(HITLS_TLS_PROTO_TLS13)
#define HITLS_TLS_SUITE_AES_128_GCM_SHA256
#endif
#if !defined(HITLS_TLS_SUITE_AES_256_GCM_SHA384) && defined(HITLS_TLS_PROTO_TLS13)
#define HITLS_TLS_SUITE_AES_256_GCM_SHA384
#endif
#if !defined(HITLS_TLS_SUITE_CHACHA20_POLY1305_SHA256) && defined(HITLS_TLS_PROTO_TLS13)
#define HITLS_TLS_SUITE_CHACHA20_POLY1305_SHA256
#endif
#if !defined(HITLS_TLS_SUITE_AES_128_CCM_SHA256) && defined(HITLS_TLS_PROTO_TLS13)
#define HITLS_TLS_SUITE_AES_128_CCM_SHA256
#endif
#if !defined(HITLS_TLS_SUITE_AES_128_CCM_8_SHA256) && defined(HITLS_TLS_PROTO_TLS13)
#define HITLS_TLS_SUITE_AES_128_CCM_8_SHA256
#endif
#ifndef HITLS_TLS_SUITE_ECDHE_PSK_WITH_CHACHA20_POLY1305_SHA256
#define HITLS_TLS_SUITE_ECDHE_PSK_WITH_CHACHA20_POLY1305_SHA256
#endif
#ifndef HITLS_TLS_SUITE_ECDHE_PSK_WITH_AES_128_CCM_SHA256
#define HITLS_TLS_SUITE_ECDHE_PSK_WITH_AES_128_CCM_SHA256
#endif
#ifndef HITLS_TLS_SUITE_ECDHE_PSK_WITH_AES_128_GCM_SHA256
#define HITLS_TLS_SUITE_ECDHE_PSK_WITH_AES_128_GCM_SHA256
#endif
#ifndef HITLS_TLS_SUITE_ECDHE_PSK_WITH_AES_256_GCM_SHA384
#define HITLS_TLS_SUITE_ECDHE_PSK_WITH_AES_256_GCM_SHA384
#endif
#endif
#if defined(HITLS_TLS_SUITE_CIPHER_AEAD) && defined(HITLS_TLS_SUITE_KX_DHE) && defined(HITLS_TLS_SUITE_AUTH_RSA)
#ifndef HITLS_TLS_SUITE_DHE_RSA_WITH_AES_128_GCM_SHA256
#define HITLS_TLS_SUITE_DHE_RSA_WITH_AES_128_GCM_SHA256
#endif
#ifndef HITLS_TLS_SUITE_DHE_RSA_WITH_AES_256_GCM_SHA384
#define HITLS_TLS_SUITE_DHE_RSA_WITH_AES_256_GCM_SHA384
#endif
#ifndef HITLS_TLS_SUITE_DHE_RSA_WITH_CHACHA20_POLY1305_SHA256
#define HITLS_TLS_SUITE_DHE_RSA_WITH_CHACHA20_POLY1305_SHA256
#endif
#ifndef HITLS_TLS_SUITE_DHE_RSA_WITH_AES_128_CCM
#define HITLS_TLS_SUITE_DHE_RSA_WITH_AES_128_CCM
#endif
#ifndef HITLS_TLS_SUITE_DHE_RSA_WITH_AES_256_CCM
#define HITLS_TLS_SUITE_DHE_RSA_WITH_AES_256_CCM
#endif
#endif
#if defined(HITLS_TLS_SUITE_CIPHER_AEAD) && defined(HITLS_TLS_SUITE_KX_DHE) && defined(HITLS_TLS_SUITE_AUTH_DSS)
#ifndef HITLS_TLS_SUITE_DHE_DSS_WITH_AES_128_GCM_SHA256
#define HITLS_TLS_SUITE_DHE_DSS_WITH_AES_128_GCM_SHA256
#endif
#ifndef HITLS_TLS_SUITE_DHE_DSS_WITH_AES_256_GCM_SHA384
#define HITLS_TLS_SUITE_DHE_DSS_WITH_AES_256_GCM_SHA384
#endif
#endif
#if defined(HITLS_TLS_SUITE_CIPHER_AEAD) && defined(HITLS_TLS_SUITE_KX_DHE) && defined(HITLS_TLS_SUITE_AUTH_PSK)
#if !defined(HITLS_TLS_SUITE_AES_128_GCM_SHA256) && defined(HITLS_TLS_PROTO_TLS13)
#define HITLS_TLS_SUITE_AES_128_GCM_SHA256
#endif
#if !defined(HITLS_TLS_SUITE_AES_256_GCM_SHA384) && defined(HITLS_TLS_PROTO_TLS13)
#define HITLS_TLS_SUITE_AES_256_GCM_SHA384
#endif
#if !defined(HITLS_TLS_SUITE_CHACHA20_POLY1305_SHA256) && defined(HITLS_TLS_PROTO_TLS13)
#define HITLS_TLS_SUITE_CHACHA20_POLY1305_SHA256
#endif
#if !defined(HITLS_TLS_SUITE_AES_128_CCM_SHA256) && defined(HITLS_TLS_PROTO_TLS13)
#define HITLS_TLS_SUITE_AES_128_CCM_SHA256
#endif
#if !defined(HITLS_TLS_SUITE_AES_128_CCM_8_SHA256) && defined(HITLS_TLS_PROTO_TLS13)
#define HITLS_TLS_SUITE_AES_128_CCM_8_SHA256
#endif
#ifndef HITLS_TLS_SUITE_DHE_PSK_WITH_AES_128_GCM_SHA256
#define HITLS_TLS_SUITE_DHE_PSK_WITH_AES_128_GCM_SHA256
#endif
#ifndef HITLS_TLS_SUITE_DHE_PSK_WITH_AES_256_GCM_SHA384
#define HITLS_TLS_SUITE_DHE_PSK_WITH_AES_256_GCM_SHA384
#endif
#ifndef HITLS_TLS_SUITE_DHE_PSK_WITH_AES_128_CCM
#define HITLS_TLS_SUITE_DHE_PSK_WITH_AES_128_CCM
#endif
#ifndef HITLS_TLS_SUITE_DHE_PSK_WITH_AES_256_CCM
#define HITLS_TLS_SUITE_DHE_PSK_WITH_AES_256_CCM
#endif
#ifndef HITLS_TLS_SUITE_DHE_PSK_WITH_CHACHA20_POLY1305_SHA256
#define HITLS_TLS_SUITE_DHE_PSK_WITH_CHACHA20_POLY1305_SHA256
#endif
#endif
#if defined(HITLS_TLS_SUITE_CIPHER_AEAD) && defined(HITLS_TLS_SUITE_KX_RSA) && defined(HITLS_TLS_SUITE_AUTH_RSA)
#ifndef HITLS_TLS_SUITE_RSA_WITH_AES_128_GCM_SHA256
#define HITLS_TLS_SUITE_RSA_WITH_AES_128_GCM_SHA256
#endif
#ifndef HITLS_TLS_SUITE_RSA_WITH_AES_256_GCM_SHA384
#define HITLS_TLS_SUITE_RSA_WITH_AES_256_GCM_SHA384
#endif
#ifndef HITLS_TLS_SUITE_RSA_PSK_WITH_AES_128_GCM_SHA256
#define HITLS_TLS_SUITE_RSA_PSK_WITH_AES_128_GCM_SHA256
#endif
#ifndef HITLS_TLS_SUITE_RSA_PSK_WITH_AES_256_GCM_SHA384
#define HITLS_TLS_SUITE_RSA_PSK_WITH_AES_256_GCM_SHA384
#endif
#ifndef HITLS_TLS_SUITE_RSA_PSK_WITH_CHACHA20_POLY1305_SHA256
#define HITLS_TLS_SUITE_RSA_PSK_WITH_CHACHA20_POLY1305_SHA256
#endif
#ifndef HITLS_TLS_SUITE_RSA_WITH_AES_128_CCM
#define HITLS_TLS_SUITE_RSA_WITH_AES_128_CCM
#endif
#ifndef HITLS_TLS_SUITE_RSA_WITH_AES_128_CCM_8
#define HITLS_TLS_SUITE_RSA_WITH_AES_128_CCM_8
#endif
#ifndef HITLS_TLS_SUITE_RSA_WITH_AES_256_CCM
#define HITLS_TLS_SUITE_RSA_WITH_AES_256_CCM
#endif
#ifndef HITLS_TLS_SUITE_RSA_WITH_AES_256_CCM_8
#define HITLS_TLS_SUITE_RSA_WITH_AES_256_CCM_8
#endif
#endif
#if defined(HITLS_TLS_SUITE_CIPHER_AEAD) && defined(HITLS_TLS_SUITE_KX_RSA) && defined(HITLS_TLS_SUITE_AUTH_PSK)
#ifndef HITLS_TLS_SUITE_RSA_PSK_WITH_AES_128_GCM_SHA256
#define HITLS_TLS_SUITE_RSA_PSK_WITH_AES_128_GCM_SHA256
#endif
#ifndef HITLS_TLS_SUITE_RSA_PSK_WITH_AES_256_GCM_SHA384
#define HITLS_TLS_SUITE_RSA_PSK_WITH_AES_256_GCM_SHA384
#endif
#ifndef HITLS_TLS_SUITE_RSA_PSK_WITH_CHACHA20_POLY1305_SHA256
#define HITLS_TLS_SUITE_RSA_PSK_WITH_CHACHA20_POLY1305_SHA256
#endif
#endif
#if defined(HITLS_TLS_SUITE_CIPHER_AEAD) && defined(HITLS_TLS_SUITE_KX_ECDHE) && defined(HITLS_TLS_SUITE_AUTH_SM2)
#ifndef HITLS_TLS_SUITE_ECDHE_SM4_GCM_SM3
#define HITLS_TLS_SUITE_ECDHE_SM4_GCM_SM3
#endif
#endif
#if defined(HITLS_TLS_SUITE_CIPHER_CBC) && defined(HITLS_TLS_SUITE_KX_ECDHE) && defined(HITLS_TLS_SUITE_AUTH_RSA)
#ifndef HITLS_TLS_SUITE_ECDHE_RSA_WITH_AES_128_CBC_SHA
#define HITLS_TLS_SUITE_ECDHE_RSA_WITH_AES_128_CBC_SHA
#endif
#ifndef HITLS_TLS_SUITE_ECDHE_RSA_WITH_AES_256_CBC_SHA
#define HITLS_TLS_SUITE_ECDHE_RSA_WITH_AES_256_CBC_SHA
#endif
#ifndef HITLS_TLS_SUITE_ECDHE_RSA_WITH_AES_128_CBC_SHA256
#define HITLS_TLS_SUITE_ECDHE_RSA_WITH_AES_128_CBC_SHA256
#endif
#ifndef HITLS_TLS_SUITE_ECDHE_RSA_WITH_AES_256_CBC_SHA384
#define HITLS_TLS_SUITE_ECDHE_RSA_WITH_AES_256_CBC_SHA384
#endif
#endif
#if defined(HITLS_TLS_SUITE_CIPHER_CBC) && defined(HITLS_TLS_SUITE_KX_ECDHE) && defined(HITLS_TLS_SUITE_AUTH_ECDSA)
#ifndef HITLS_TLS_SUITE_ECDHE_ECDSA_WITH_AES_128_CBC_SHA
#define HITLS_TLS_SUITE_ECDHE_ECDSA_WITH_AES_128_CBC_SHA
#endif
#ifndef HITLS_TLS_SUITE_ECDHE_ECDSA_WITH_AES_256_CBC_SHA
#define HITLS_TLS_SUITE_ECDHE_ECDSA_WITH_AES_256_CBC_SHA
#endif
#ifndef HITLS_TLS_SUITE_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256
#define HITLS_TLS_SUITE_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256
#endif
#ifndef HITLS_TLS_SUITE_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384
#define HITLS_TLS_SUITE_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384
#endif
#endif
#if defined(HITLS_TLS_SUITE_CIPHER_CBC) && defined(HITLS_TLS_SUITE_KX_ECDHE) && defined(HITLS_TLS_SUITE_AUTH_PSK)
#ifndef HITLS_TLS_SUITE_ECDHE_PSK_WITH_AES_128_CBC_SHA
#define HITLS_TLS_SUITE_ECDHE_PSK_WITH_AES_128_CBC_SHA
#endif
#ifndef HITLS_TLS_SUITE_ECDHE_PSK_WITH_AES_256_CBC_SHA
#define HITLS_TLS_SUITE_ECDHE_PSK_WITH_AES_256_CBC_SHA
#endif
#ifndef HITLS_TLS_SUITE_ECDHE_PSK_WITH_AES_128_CBC_SHA256
#define HITLS_TLS_SUITE_ECDHE_PSK_WITH_AES_128_CBC_SHA256
#endif
#ifndef HITLS_TLS_SUITE_ECDHE_PSK_WITH_AES_256_CBC_SHA384
#define HITLS_TLS_SUITE_ECDHE_PSK_WITH_AES_256_CBC_SHA384
#endif
#endif
#if defined(HITLS_TLS_SUITE_CIPHER_CBC) && defined(HITLS_TLS_SUITE_KX_ECDHE) && defined(HITLS_TLS_SUITE_AUTH_SM2)
#ifndef HITLS_TLS_SUITE_ECDHE_SM4_CBC_SM3
#define HITLS_TLS_SUITE_ECDHE_SM4_CBC_SM3
#endif
#endif
#if defined(HITLS_TLS_SUITE_CIPHER_CBC) && defined(HITLS_TLS_SUITE_KX_DHE) && defined(HITLS_TLS_SUITE_AUTH_RSA)
#ifndef HITLS_TLS_SUITE_DHE_RSA_WITH_AES_128_CBC_SHA
#define HITLS_TLS_SUITE_DHE_RSA_WITH_AES_128_CBC_SHA
#endif
#ifndef HITLS_TLS_SUITE_DHE_RSA_WITH_AES_256_CBC_SHA
#define HITLS_TLS_SUITE_DHE_RSA_WITH_AES_256_CBC_SHA
#endif
#ifndef HITLS_TLS_SUITE_DHE_RSA_WITH_AES_128_CBC_SHA256
#define HITLS_TLS_SUITE_DHE_RSA_WITH_AES_128_CBC_SHA256
#endif
#ifndef HITLS_TLS_SUITE_DHE_RSA_WITH_AES_256_CBC_SHA256
#define HITLS_TLS_SUITE_DHE_RSA_WITH_AES_256_CBC_SHA256
#endif
#endif
#if defined(HITLS_TLS_SUITE_CIPHER_CBC) && defined(HITLS_TLS_SUITE_KX_DHE) && defined(HITLS_TLS_SUITE_AUTH_DSS)
#ifndef HITLS_TLS_SUITE_DHE_DSS_WITH_AES_128_CBC_SHA
#define HITLS_TLS_SUITE_DHE_DSS_WITH_AES_128_CBC_SHA
#endif
#ifndef HITLS_TLS_SUITE_DHE_DSS_WITH_AES_256_CBC_SHA
#define HITLS_TLS_SUITE_DHE_DSS_WITH_AES_256_CBC_SHA
#endif
#ifndef HITLS_TLS_SUITE_DHE_DSS_WITH_AES_128_CBC_SHA256
#define HITLS_TLS_SUITE_DHE_DSS_WITH_AES_128_CBC_SHA256
#endif
#ifndef HITLS_TLS_SUITE_DHE_DSS_WITH_AES_256_CBC_SHA256
#define HITLS_TLS_SUITE_DHE_DSS_WITH_AES_256_CBC_SHA256
#endif
#endif
#if defined(HITLS_TLS_SUITE_CIPHER_CBC) && defined(HITLS_TLS_SUITE_KX_DHE) && defined(HITLS_TLS_SUITE_AUTH_PSK)
#ifndef HITLS_TLS_SUITE_DHE_PSK_WITH_AES_128_CBC_SHA
#define HITLS_TLS_SUITE_DHE_PSK_WITH_AES_128_CBC_SHA
#endif
#ifndef HITLS_TLS_SUITE_DHE_PSK_WITH_AES_256_CBC_SHA
#define HITLS_TLS_SUITE_DHE_PSK_WITH_AES_256_CBC_SHA
#endif
#ifndef HITLS_TLS_SUITE_DHE_PSK_WITH_AES_128_CBC_SHA256
#define HITLS_TLS_SUITE_DHE_PSK_WITH_AES_128_CBC_SHA256
#endif
#ifndef HITLS_TLS_SUITE_DHE_PSK_WITH_AES_256_CBC_SHA384
#define HITLS_TLS_SUITE_DHE_PSK_WITH_AES_256_CBC_SHA384
#endif
#endif
#if defined(HITLS_TLS_SUITE_CIPHER_CBC) && defined(HITLS_TLS_SUITE_KX_RSA) && defined(HITLS_TLS_SUITE_AUTH_RSA)
#ifndef HITLS_TLS_SUITE_RSA_WITH_AES_128_CBC_SHA
#define HITLS_TLS_SUITE_RSA_WITH_AES_128_CBC_SHA
#endif
#ifndef HITLS_TLS_SUITE_RSA_WITH_AES_256_CBC_SHA
#define HITLS_TLS_SUITE_RSA_WITH_AES_256_CBC_SHA
#endif
#ifndef HITLS_TLS_SUITE_RSA_WITH_AES_128_CBC_SHA256
#define HITLS_TLS_SUITE_RSA_WITH_AES_128_CBC_SHA256
#endif
#ifndef HITLS_TLS_SUITE_RSA_WITH_AES_256_CBC_SHA256
#define HITLS_TLS_SUITE_RSA_WITH_AES_256_CBC_SHA256
#endif
#ifndef HITLS_TLS_SUITE_RSA_PSK_WITH_AES_128_CBC_SHA
#define HITLS_TLS_SUITE_RSA_PSK_WITH_AES_128_CBC_SHA
#endif
#ifndef HITLS_TLS_SUITE_RSA_PSK_WITH_AES_256_CBC_SHA
#define HITLS_TLS_SUITE_RSA_PSK_WITH_AES_256_CBC_SHA
#endif
#ifndef HITLS_TLS_SUITE_RSA_PSK_WITH_AES_128_CBC_SHA256
#define HITLS_TLS_SUITE_RSA_PSK_WITH_AES_128_CBC_SHA256
#endif
#ifndef HITLS_TLS_SUITE_RSA_PSK_WITH_AES_256_CBC_SHA384
#define HITLS_TLS_SUITE_RSA_PSK_WITH_AES_256_CBC_SHA384
#endif
#endif
#if defined(HITLS_TLS_SUITE_CIPHER_CBC) && defined(HITLS_TLS_SUITE_KX_RSA) && defined(HITLS_TLS_SUITE_AUTH_PSK)
#ifndef HITLS_TLS_SUITE_RSA_PSK_WITH_AES_128_CBC_SHA
#define HITLS_TLS_SUITE_RSA_PSK_WITH_AES_128_CBC_SHA
#endif
#ifndef HITLS_TLS_SUITE_RSA_PSK_WITH_AES_256_CBC_SHA
#define HITLS_TLS_SUITE_RSA_PSK_WITH_AES_256_CBC_SHA
#endif
#ifndef HITLS_TLS_SUITE_RSA_PSK_WITH_AES_128_CBC_SHA256
#define HITLS_TLS_SUITE_RSA_PSK_WITH_AES_128_CBC_SHA256
#endif
#ifndef HITLS_TLS_SUITE_RSA_PSK_WITH_AES_256_CBC_SHA384
#define HITLS_TLS_SUITE_RSA_PSK_WITH_AES_256_CBC_SHA384
#endif
#endif
#if defined(HITLS_TLS_SUITE_CIPHER_AEAD) && defined(HITLS_TLS_SUITE_KX_DHE)
#ifndef HITLS_TLS_SUITE_DH_ANON_WITH_AES_128_GCM_SHA256
#define HITLS_TLS_SUITE_DH_ANON_WITH_AES_128_GCM_SHA256
#endif
#ifndef HITLS_TLS_SUITE_DH_ANON_WITH_AES_256_GCM_SHA384
#define HITLS_TLS_SUITE_DH_ANON_WITH_AES_256_GCM_SHA384
#endif
#endif
#if defined(HITLS_TLS_SUITE_CIPHER_CBC) && defined(HITLS_TLS_SUITE_KX_ECDHE)
#ifndef HITLS_TLS_SUITE_ECDH_ANON_WITH_AES_128_CBC_SHA
#define HITLS_TLS_SUITE_ECDH_ANON_WITH_AES_128_CBC_SHA
#endif
#ifndef HITLS_TLS_SUITE_ECDH_ANON_WITH_AES_256_CBC_SHA
#define HITLS_TLS_SUITE_ECDH_ANON_WITH_AES_256_CBC_SHA
#endif
#endif
#if defined(HITLS_TLS_SUITE_CIPHER_CBC) && defined(HITLS_TLS_SUITE_KX_DHE)
#ifndef HITLS_TLS_SUITE_DH_ANON_WITH_AES_128_CBC_SHA
#define HITLS_TLS_SUITE_DH_ANON_WITH_AES_128_CBC_SHA
#endif
#ifndef HITLS_TLS_SUITE_DH_ANON_WITH_AES_256_CBC_SHA
#define HITLS_TLS_SUITE_DH_ANON_WITH_AES_256_CBC_SHA
#endif
#ifndef HITLS_TLS_SUITE_DH_ANON_WITH_AES_128_CBC_SHA256
#define HITLS_TLS_SUITE_DH_ANON_WITH_AES_128_CBC_SHA256
#endif
#ifndef HITLS_TLS_SUITE_DH_ANON_WITH_AES_256_CBC_SHA256
#define HITLS_TLS_SUITE_DH_ANON_WITH_AES_256_CBC_SHA256
#endif
#endif
#if defined(HITLS_TLS_SUITE_CIPHER_AEAD) && defined(HITLS_TLS_SUITE_AUTH_PSK)
#ifndef HITLS_TLS_SUITE_PSK_WITH_AES_128_GCM_SHA256
#define HITLS_TLS_SUITE_PSK_WITH_AES_128_GCM_SHA256
#endif
#ifndef HITLS_TLS_SUITE_PSK_WITH_AES_256_GCM_SHA384
#define HITLS_TLS_SUITE_PSK_WITH_AES_256_GCM_SHA384
#endif
#ifndef HITLS_TLS_SUITE_PSK_WITH_AES_256_CCM
#define HITLS_TLS_SUITE_PSK_WITH_AES_256_CCM
#endif
#ifndef HITLS_TLS_SUITE_PSK_WITH_CHACHA20_POLY1305_SHA256
#define HITLS_TLS_SUITE_PSK_WITH_CHACHA20_POLY1305_SHA256
#endif
#endif
#if defined(HITLS_TLS_SUITE_CIPHER_CBC) && defined(HITLS_TLS_SUITE_AUTH_PSK)
#ifndef HITLS_TLS_SUITE_PSK_WITH_AES_128_CBC_SHA
#define HITLS_TLS_SUITE_PSK_WITH_AES_128_CBC_SHA
#endif
#ifndef HITLS_TLS_SUITE_PSK_WITH_AES_256_CBC_SHA
#define HITLS_TLS_SUITE_PSK_WITH_AES_256_CBC_SHA
#endif
#ifndef HITLS_TLS_SUITE_PSK_WITH_AES_128_CBC_SHA256
#define HITLS_TLS_SUITE_PSK_WITH_AES_128_CBC_SHA256
#endif
#ifndef HITLS_TLS_SUITE_PSK_WITH_AES_256_CBC_SHA384
#define HITLS_TLS_SUITE_PSK_WITH_AES_256_CBC_SHA384
#endif
#endif
#if defined(HITLS_TLS_SUITE_CIPHER_CBC) && defined(HITLS_TLS_SUITE_AUTH_SM2)
#ifndef HITLS_TLS_SUITE_ECC_SM4_CBC_SM3
#define HITLS_TLS_SUITE_ECC_SM4_CBC_SM3
#endif
#endif
#if defined(HITLS_TLS_SUITE_CIPHER_AEAD) && defined(HITLS_TLS_SUITE_AUTH_SM2)
#ifndef HITLS_TLS_SUITE_ECC_SM4_GCM_SM3
#define HITLS_TLS_SUITE_ECC_SM4_GCM_SM3
#endif
#endif
#if defined(HITLS_TLS_SUITE_AES_128_GCM_SHA256) || defined(HITLS_TLS_SUITE_AES_256_GCM_SHA384) || \
defined(HITLS_TLS_SUITE_CHACHA20_POLY1305_SHA256) || defined(HITLS_TLS_SUITE_AES_128_CCM_SHA256) || \
defined(HITLS_TLS_SUITE_AES_128_CCM_8_SHA256)
#ifndef HITLS_TLS_SUITE_CIPHER_AEAD
#define HITLS_TLS_SUITE_CIPHER_AEAD
#endif
#ifndef HITLS_TLS_SUITE_KX_ECDHE
#define HITLS_TLS_SUITE_KX_ECDHE
#endif
#endif
#if defined(HITLS_TLS_SUITE_RSA_WITH_AES_128_CBC_SHA) || defined(HITLS_TLS_SUITE_RSA_WITH_AES_256_CBC_SHA) || \
defined(HITLS_TLS_SUITE_RSA_WITH_AES_128_CBC_SHA256) || defined(HITLS_TLS_SUITE_RSA_WITH_AES_256_CBC_SHA256)
#ifndef HITLS_TLS_SUITE_CIPHER_CBC
#define HITLS_TLS_SUITE_CIPHER_CBC
#endif
#ifndef HITLS_TLS_SUITE_KX_RSA
#define HITLS_TLS_SUITE_KX_RSA
#endif
#ifndef HITLS_TLS_SUITE_AUTH_RSA
#define HITLS_TLS_SUITE_AUTH_RSA
#endif
#endif
#if defined(HITLS_TLS_SUITE_RSA_WITH_AES_128_GCM_SHA256) || \
defined(HITLS_TLS_SUITE_RSA_WITH_AES_256_GCM_SHA384) || \
defined(HITLS_TLS_SUITE_RSA_WITH_AES_128_CCM) || defined(HITLS_TLS_SUITE_RSA_WITH_AES_128_CCM_8) || \
defined(HITLS_TLS_SUITE_RSA_WITH_AES_256_CCM) || defined(HITLS_TLS_SUITE_RSA_WITH_AES_256_CCM_8)
#ifndef HITLS_TLS_SUITE_CIPHER_AEAD
#define HITLS_TLS_SUITE_CIPHER_AEAD
#endif
#ifndef HITLS_TLS_SUITE_KX_RSA
#define HITLS_TLS_SUITE_KX_RSA
#endif
#ifndef HITLS_TLS_SUITE_AUTH_RSA
#define HITLS_TLS_SUITE_AUTH_RSA
#endif
#endif
#if defined(HITLS_TLS_SUITE_DHE_RSA_WITH_AES_128_GCM_SHA256) || \
defined(HITLS_TLS_SUITE_DHE_RSA_WITH_AES_256_GCM_SHA384) || \
defined(HITLS_TLS_SUITE_DHE_RSA_WITH_CHACHA20_POLY1305_SHA256) || \
defined(HITLS_TLS_SUITE_DHE_RSA_WITH_AES_128_CCM) || defined(HITLS_TLS_SUITE_DHE_RSA_WITH_AES_256_CCM)
#ifndef HITLS_TLS_SUITE_CIPHER_AEAD
#define HITLS_TLS_SUITE_CIPHER_AEAD
#endif
#ifndef HITLS_TLS_SUITE_KX_DHE
#define HITLS_TLS_SUITE_KX_DHE
#endif
#ifndef HITLS_TLS_SUITE_AUTH_RSA
#define HITLS_TLS_SUITE_AUTH_RSA
#endif
#endif
#if defined(HITLS_TLS_SUITE_ECDHE_ECDSA_WITH_AES_128_CBC_SHA) || \
defined(HITLS_TLS_SUITE_ECDHE_ECDSA_WITH_AES_256_CBC_SHA) || \
defined(HITLS_TLS_SUITE_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256) || \
defined(HITLS_TLS_SUITE_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384)
#ifndef HITLS_TLS_SUITE_CIPHER_CBC
#define HITLS_TLS_SUITE_CIPHER_CBC
#endif
#ifndef HITLS_TLS_SUITE_KX_ECDHE
#define HITLS_TLS_SUITE_KX_ECDHE
#endif
#ifndef HITLS_TLS_SUITE_AUTH_ECDSA
#define HITLS_TLS_SUITE_AUTH_ECDSA
#endif
#endif
#if defined(HITLS_TLS_SUITE_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256) || \
defined(HITLS_TLS_SUITE_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384) || \
defined(HITLS_TLS_SUITE_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256) || \
defined(HITLS_TLS_SUITE_ECDHE_ECDSA_WITH_AES_128_CCM) || defined(HITLS_TLS_SUITE_ECDHE_ECDSA_WITH_AES_256_CCM)
#ifndef HITLS_TLS_SUITE_CIPHER_AEAD
#define HITLS_TLS_SUITE_CIPHER_AEAD
#endif
#ifndef HITLS_TLS_SUITE_KX_ECDHE
#define HITLS_TLS_SUITE_KX_ECDHE
#endif
#ifndef HITLS_TLS_SUITE_AUTH_ECDSA
#define HITLS_TLS_SUITE_AUTH_ECDSA
#endif
#endif
#if defined(HITLS_TLS_SUITE_ECDHE_RSA_WITH_AES_128_CBC_SHA) || \
defined(HITLS_TLS_SUITE_ECDHE_RSA_WITH_AES_256_CBC_SHA) || \
defined(HITLS_TLS_SUITE_ECDHE_RSA_WITH_AES_128_CBC_SHA256) || \
defined(HITLS_TLS_SUITE_ECDHE_RSA_WITH_AES_256_CBC_SHA384)
#ifndef HITLS_TLS_SUITE_CIPHER_CBC
#define HITLS_TLS_SUITE_CIPHER_CBC
#endif
#ifndef HITLS_TLS_SUITE_KX_ECDHE
#define HITLS_TLS_SUITE_KX_ECDHE
#endif
#ifndef HITLS_TLS_SUITE_AUTH_RSA
#define HITLS_TLS_SUITE_AUTH_RSA
#endif
#endif
#if defined(HITLS_TLS_SUITE_ECDHE_RSA_WITH_AES_128_GCM_SHA256) || \
defined(HITLS_TLS_SUITE_ECDHE_RSA_WITH_AES_256_GCM_SHA384) || \
defined(HITLS_TLS_SUITE_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256)
#ifndef HITLS_TLS_SUITE_CIPHER_AEAD
#define HITLS_TLS_SUITE_CIPHER_AEAD
#endif
#ifndef HITLS_TLS_SUITE_KX_ECDHE
#define HITLS_TLS_SUITE_KX_ECDHE
#endif
#ifndef HITLS_TLS_SUITE_AUTH_RSA
#define HITLS_TLS_SUITE_AUTH_RSA
#endif
#endif
#if defined(HITLS_TLS_SUITE_DHE_DSS_WITH_AES_128_GCM_SHA256) || \
defined(HITLS_TLS_SUITE_DHE_DSS_WITH_AES_256_GCM_SHA384)
#ifndef HITLS_TLS_SUITE_CIPHER_AEAD
#define HITLS_TLS_SUITE_CIPHER_AEAD
#endif
#ifndef HITLS_TLS_SUITE_KX_DHE
#define HITLS_TLS_SUITE_KX_DHE
#endif
#ifndef HITLS_TLS_SUITE_AUTH_DSS
#define HITLS_TLS_SUITE_AUTH_DSS
#endif
#endif
#if defined(HITLS_TLS_SUITE_DHE_DSS_WITH_AES_128_CBC_SHA) || \
defined(HITLS_TLS_SUITE_DHE_DSS_WITH_AES_256_CBC_SHA) || \
defined(HITLS_TLS_SUITE_DHE_DSS_WITH_AES_128_CBC_SHA256) || \
defined(HITLS_TLS_SUITE_DHE_DSS_WITH_AES_256_CBC_SHA256)
#ifndef HITLS_TLS_SUITE_CIPHER_CBC
#define HITLS_TLS_SUITE_CIPHER_CBC
#endif
#ifndef HITLS_TLS_SUITE_KX_DHE
#define HITLS_TLS_SUITE_KX_DHE
#endif
#ifndef HITLS_TLS_SUITE_AUTH_DSS
#define HITLS_TLS_SUITE_AUTH_DSS
#endif
#endif
#if defined(HITLS_TLS_SUITE_DHE_RSA_WITH_AES_128_CBC_SHA) || \
defined(HITLS_TLS_SUITE_DHE_RSA_WITH_AES_256_CBC_SHA) || \
defined(HITLS_TLS_SUITE_DHE_RSA_WITH_AES_128_CBC_SHA256) || \
defined(HITLS_TLS_SUITE_DHE_RSA_WITH_AES_256_CBC_SHA256)
#ifndef HITLS_TLS_SUITE_CIPHER_CBC
#define HITLS_TLS_SUITE_CIPHER_CBC
#endif
#ifndef HITLS_TLS_SUITE_KX_DHE
#define HITLS_TLS_SUITE_KX_DHE
#endif
#ifndef HITLS_TLS_SUITE_AUTH_RSA
#define HITLS_TLS_SUITE_AUTH_RSA
#endif
#endif
#if defined(HITLS_TLS_SUITE_PSK_WITH_AES_128_CBC_SHA) || defined(HITLS_TLS_SUITE_PSK_WITH_AES_256_CBC_SHA) || \
defined(HITLS_TLS_SUITE_PSK_WITH_AES_128_CBC_SHA256) || defined(HITLS_TLS_SUITE_PSK_WITH_AES_256_CBC_SHA384)
#ifndef HITLS_TLS_SUITE_CIPHER_CBC
#define HITLS_TLS_SUITE_CIPHER_CBC
#endif
#ifndef HITLS_TLS_SUITE_AUTH_PSK
#define HITLS_TLS_SUITE_AUTH_PSK
#endif
#endif
#if defined(HITLS_TLS_SUITE_DHE_PSK_WITH_AES_128_CBC_SHA) || \
defined(HITLS_TLS_SUITE_DHE_PSK_WITH_AES_256_CBC_SHA) || \
defined(HITLS_TLS_SUITE_DHE_PSK_WITH_AES_128_CBC_SHA256) || \
defined(HITLS_TLS_SUITE_DHE_PSK_WITH_AES_256_CBC_SHA384)
#ifndef HITLS_TLS_SUITE_CIPHER_CBC
#define HITLS_TLS_SUITE_CIPHER_CBC
#endif
#ifndef HITLS_TLS_SUITE_KX_DHE
#define HITLS_TLS_SUITE_KX_DHE
#endif
#ifndef HITLS_TLS_SUITE_AUTH_PSK
#define HITLS_TLS_SUITE_AUTH_PSK
#endif
#endif
#if defined(HITLS_TLS_SUITE_RSA_PSK_WITH_AES_128_CBC_SHA) || \
defined(HITLS_TLS_SUITE_RSA_PSK_WITH_AES_256_CBC_SHA) || \
defined(HITLS_TLS_SUITE_RSA_PSK_WITH_AES_128_CBC_SHA256) || \
defined(HITLS_TLS_SUITE_RSA_PSK_WITH_AES_256_CBC_SHA384)
#ifndef HITLS_TLS_SUITE_CIPHER_CBC
#define HITLS_TLS_SUITE_CIPHER_CBC
#endif
#ifndef HITLS_TLS_SUITE_KX_RSA
#define HITLS_TLS_SUITE_KX_RSA
#endif
#ifndef HITLS_TLS_SUITE_AUTH_RSA
#define HITLS_TLS_SUITE_AUTH_RSA
#endif
#ifndef HITLS_TLS_SUITE_AUTH_PSK
#define HITLS_TLS_SUITE_AUTH_PSK
#endif
#endif
#if defined(HITLS_TLS_SUITE_PSK_WITH_AES_128_GCM_SHA256) || defined(HITLS_TLS_SUITE_PSK_WITH_AES_256_GCM_SHA384) || \
defined(HITLS_TLS_SUITE_PSK_WITH_AES_256_CCM) || defined(HITLS_TLS_SUITE_PSK_WITH_CHACHA20_POLY1305_SHA256)
#ifndef HITLS_TLS_SUITE_CIPHER_AEAD
#define HITLS_TLS_SUITE_CIPHER_AEAD
#endif
#ifndef HITLS_TLS_SUITE_AUTH_PSK
#define HITLS_TLS_SUITE_AUTH_PSK
#endif
#endif
#if defined(HITLS_TLS_SUITE_DHE_PSK_WITH_AES_128_GCM_SHA256) || \
defined(HITLS_TLS_SUITE_DHE_PSK_WITH_AES_256_GCM_SHA384) || defined(HITLS_TLS_SUITE_DHE_PSK_WITH_AES_128_CCM) || \
defined(HITLS_TLS_SUITE_DHE_PSK_WITH_AES_256_CCM) || defined(HITLS_TLS_SUITE_DHE_PSK_WITH_CHACHA20_POLY1305_SHA256)
#ifndef HITLS_TLS_SUITE_CIPHER_AEAD
#define HITLS_TLS_SUITE_CIPHER_AEAD
#endif
#ifndef HITLS_TLS_SUITE_KX_DHE
#define HITLS_TLS_SUITE_KX_DHE
#endif
#ifndef HITLS_TLS_SUITE_AUTH_PSK
#define HITLS_TLS_SUITE_AUTH_PSK
#endif
#endif
#if defined(HITLS_TLS_SUITE_RSA_PSK_WITH_AES_128_GCM_SHA256) || \
defined(HITLS_TLS_SUITE_RSA_PSK_WITH_AES_256_GCM_SHA384) || \
defined(HITLS_TLS_SUITE_RSA_PSK_WITH_CHACHA20_POLY1305_SHA256)
#ifndef HITLS_TLS_SUITE_CIPHER_AEAD
#define HITLS_TLS_SUITE_CIPHER_AEAD
#endif
#ifndef HITLS_TLS_SUITE_KX_RSA
#define HITLS_TLS_SUITE_KX_RSA
#endif
#ifndef HITLS_TLS_SUITE_AUTH_RSA
#define HITLS_TLS_SUITE_AUTH_RSA
#endif
#ifndef HITLS_TLS_SUITE_AUTH_PSK
#define HITLS_TLS_SUITE_AUTH_PSK
#endif
#endif
#if defined(HITLS_TLS_SUITE_ECDHE_PSK_WITH_AES_128_CBC_SHA) || \
defined(HITLS_TLS_SUITE_ECDHE_PSK_WITH_AES_256_CBC_SHA) || \
defined(HITLS_TLS_SUITE_ECDHE_PSK_WITH_AES_128_CBC_SHA256) || \
defined(HITLS_TLS_SUITE_ECDHE_PSK_WITH_AES_256_CBC_SHA384)
#ifndef HITLS_TLS_SUITE_CIPHER_CBC
#define HITLS_TLS_SUITE_CIPHER_CBC
#endif
#ifndef HITLS_TLS_SUITE_KX_ECDHE
#define HITLS_TLS_SUITE_KX_ECDHE
#endif
#ifndef HITLS_TLS_SUITE_AUTH_PSK
#define HITLS_TLS_SUITE_AUTH_PSK
#endif
#endif
#if defined(HITLS_TLS_SUITE_ECDHE_PSK_WITH_CHACHA20_POLY1305_SHA256) || \
defined(HITLS_TLS_SUITE_ECDHE_PSK_WITH_AES_128_CCM_SHA256) || \
defined(HITLS_TLS_SUITE_ECDHE_PSK_WITH_AES_128_GCM_SHA256) || \
defined(HITLS_TLS_SUITE_ECDHE_PSK_WITH_AES_256_GCM_SHA384)
#ifndef HITLS_TLS_SUITE_CIPHER_AEAD
#define HITLS_TLS_SUITE_CIPHER_AEAD
#endif
#ifndef HITLS_TLS_SUITE_KX_ECDHE
#define HITLS_TLS_SUITE_KX_ECDHE
#endif
#ifndef HITLS_TLS_SUITE_AUTH_PSK
#define HITLS_TLS_SUITE_AUTH_PSK
#endif
#endif
#if defined(HITLS_TLS_SUITE_DH_ANON_WITH_AES_128_CBC_SHA) || \
defined(HITLS_TLS_SUITE_DH_ANON_WITH_AES_256_CBC_SHA) || \
defined(HITLS_TLS_SUITE_DH_ANON_WITH_AES_128_CBC_SHA256) || \
defined(HITLS_TLS_SUITE_DH_ANON_WITH_AES_256_CBC_SHA256)
#ifndef HITLS_TLS_SUITE_CIPHER_CBC
#define HITLS_TLS_SUITE_CIPHER_CBC
#endif
#ifndef HITLS_TLS_SUITE_KX_DHE
#define HITLS_TLS_SUITE_KX_DHE
#endif
#endif
#if defined(HITLS_TLS_SUITE_DH_ANON_WITH_AES_128_GCM_SHA256) || \
defined(HITLS_TLS_SUITE_DH_ANON_WITH_AES_256_GCM_SHA384)
#ifndef HITLS_TLS_SUITE_CIPHER_AEAD
#define HITLS_TLS_SUITE_CIPHER_AEAD
#endif
#ifndef HITLS_TLS_SUITE_KX_DHE
#define HITLS_TLS_SUITE_KX_DHE
#endif
#endif
#if defined(HITLS_TLS_SUITE_ECDH_ANON_WITH_AES_128_CBC_SHA) || \
defined(HITLS_TLS_SUITE_ECDH_ANON_WITH_AES_256_CBC_SHA)
#ifndef HITLS_TLS_SUITE_CIPHER_CBC
#define HITLS_TLS_SUITE_CIPHER_CBC
#endif
#ifndef HITLS_TLS_SUITE_KX_ECDHE
#define HITLS_TLS_SUITE_KX_ECDHE
#endif
#endif
#if defined(HITLS_TLS_SUITE_ECDHE_SM4_CBC_SM3)
#ifndef HITLS_TLS_SUITE_CIPHER_CBC
#define HITLS_TLS_SUITE_CIPHER_CBC
#endif
#ifndef HITLS_TLS_SUITE_KX_ECDHE
#define HITLS_TLS_SUITE_KX_ECDHE
#endif
#ifndef HITLS_TLS_SUITE_AUTH_SM2
#define HITLS_TLS_SUITE_AUTH_SM2
#endif
#endif
#if defined(HITLS_TLS_SUITE_ECC_SM4_CBC_SM3)
#ifndef HITLS_TLS_SUITE_CIPHER_CBC
#define HITLS_TLS_SUITE_CIPHER_CBC
#endif
#ifndef HITLS_TLS_SUITE_AUTH_SM2
#define HITLS_TLS_SUITE_AUTH_SM2
#endif
#endif
#if defined(HITLS_TLS_SUITE_ECDHE_SM4_GCM_SM3)
#ifndef HITLS_TLS_SUITE_CIPHER_AEAD
#define HITLS_TLS_SUITE_CIPHER_AEAD
#endif
#ifndef HITLS_TLS_SUITE_KX_ECDHE
#define HITLS_TLS_SUITE_KX_ECDHE
#endif
#ifndef HITLS_TLS_SUITE_AUTH_SM2
#define HITLS_TLS_SUITE_AUTH_SM2
#endif
#endif
#if defined(HITLS_TLS_SUITE_ECC_SM4_GCM_SM3)
#ifndef HITLS_TLS_SUITE_CIPHER_AEAD
#define HITLS_TLS_SUITE_CIPHER_AEAD
#endif
#ifndef HITLS_TLS_SUITE_AUTH_SM2
#define HITLS_TLS_SUITE_AUTH_SM2
#endif
#endif
#ifdef HITLS_TLS_SUITE_CIPHER_CBC
#ifndef HITLS_TLS_CALLBACK_CRYPT_HMAC_PRIMITIVES
#define HITLS_TLS_CALLBACK_CRYPT_HMAC_PRIMITIVES
#endif
#endif
#ifdef HITLS_TLS_SUITE_CIPHER_CBC
#ifndef HITLS_TLS_FEATURE_ETM
#define HITLS_TLS_FEATURE_ETM
#endif
#endif
#if defined(HITLS_TLS_SUITE_AUTH_ECDSA) || defined(HITLS_TLS_SUITE_AUTH_RSA) || defined(HITLS_TLS_SUITE_AUTH_DSS) || \
defined(HITLS_TLS_SUITE_AUTH_PSK) || defined(HITLS_TLS_SUITE_AUTH_SM2)
#ifndef HITLS_TLS_SUITE_AUTH
#define HITLS_TLS_SUITE_AUTH
#endif
#endif
#endif /* HITLS_CONFIG_LAYER_TLS_H */ | 2302_82127028/openHiTLS-examples_1508 | config/macro_config/hitls_config_layer_tls.h | C | unknown | 41,045 |
set(CMAKE_SYSTEM_NAME Generic)
set(CMAKE_SYSTEM_PROCESSOR arm)
set(TOOLCHAIN_PATH /usr/bin)
set(TOOLCHAIN_PREFIX arm-none-eabi)
set(CMAKE_C_COMPILER ${TOOLCHAIN_PATH}/${TOOLCHAIN_PREFIX}-gcc)
set(CMAKE_CXX_COMPILER ${TOOLCHAIN_PATH}/${TOOLCHAIN_PREFIX}-g++)
set(CMAKE_ASM_COMPILER ${TOOLCHAIN_PATH}/${TOOLCHAIN_PREFIX}-gcc)
set(CMAKE_AR ${TOOLCHAIN_PATH}/${TOOLCHAIN_PREFIX}-ar)
set(CMAKE_RANLIB ${TOOLCHAIN_PATH}/${TOOLCHAIN_PREFIX}-ranlib)
set(CMAKE_STRIP ${TOOLCHAIN_PATH}/${TOOLCHAIN_PREFIX}-strip)
set(CMAKE_OBJCOPY ${TOOLCHAIN_PATH}/${TOOLCHAIN_PREFIX}-objcopy)
set(CMAKE_OBJDUMP ${TOOLCHAIN_PATH}/${TOOLCHAIN_PREFIX}-objdump)
set(BUILD_SHARED_LIBS OFF)
set(CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY) | 2302_82127028/openHiTLS-examples_1508 | config/toolchain/arm-none-eabi-gcc_toolchain.cmake | CMake | unknown | 712 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of the openHiTLS project.
#
# openHiTLS is licensed under the Mulan PSL v2.
# You can use this software according to the terms and conditions of the Mulan PSL v2.
# You may obtain a copy of Mulan PSL v2 at:
#
# http://license.coscl.org.cn/MulanPSL2
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
# See the Mulan PSL v2 for more details.
"""
Customize the openHiTLS build.
Generate the modules.cmake file based on command line arguments and configuration files.
Options usage and examples:
1 Enable the feature on demand and specify the implementation type of the feature, c or assembly.
# Use 'enable' to specify the features to be constructed.
# Compile C code if there is no other parameter.
./configure.py --enable all # Build all features of openHiTLS.
./configure.py --enable hitls_crypto # Build all features in the lib hitls_crypto.
./configure.py --enable md # Build all sub features of md.
./configure.py --enable sha2 sha3 hmac # Specifies to build certain features.
# Use 'enable' to specify the features to be constructed.
# Use 'asm_type' to specify the assembly type.
# If there are features in enable list that supports assembly, compile its assembly implementation.
./configure.py --enable sm3 aes ... --asm_type armv8
# Use 'enable' to specify the features to be constructed.
# Use 'asm_type' to specify the assembly type.
# Use 'asm' to specify the assembly feature(s), which is(are) based on the enabled features.
# Compile the assembly code of the features in the asm, and the C code of other features in the enable list.
./configure.py --enable sm3 aes ... --asm_type armv8 --asm sm3
2 Compile options: Add or delete compilation options based on the default compilation options (compile.json).
./configure.py --add_options "-O0 -g" --del_options "-O2 -D_FORTIFY_SOURCE=2"
3 Link options: Add or delete link options based on the default link options (compile.json).
./configure.py --add_link_flags "xxx xxx" --del_link_flags "xxx xxx"
4 Set the endian mode of the system. Set the endian mode of the system. The default value is little endian.
./configure.py --endian big
5 Specifies the system type.
./configure.py --system linux
6 Specifies the number of system bits.
./configure.py --bits 32
7 Generating modules.cmake
./configure.py -m
8 Specifies the directory where the compilation middleware is generated. The default directory is ./output.
./configure.py --build_dir build
9 Specifies the lib type.
./configure.py --lib_type static
./configure.py --lib_type static shared object
10 You can directly specify the compilation configuration files, omitting the above 1~9 command line parameters.
For the file format, please refer to the compile_config.json and feature_config.json files generated after executing
the above 1~9 commands.
./configure.py --feature_config path/to/xxx.json --compile_config path/to/xxx.json
Note:
Options for different functions can be combined.
"""
import sys
sys.dont_write_bytecode = True
import os
import argparse
import traceback
import glob
from script.methods import copy_file, save_json_file, trans2list
from script.config_parser import (FeatureParser, CompileParser, FeatureConfigParser,
CompileConfigParser, CompleteOptionParser)
srcdir = os.path.dirname(os.path.realpath(sys.argv[0]))
work_dir = os.path.abspath(os.getcwd())
def get_cfg_args():
parser = argparse.ArgumentParser(prog='openHiTLS', description='parser configure arguments')
try:
# Version/Release Build Configuration Parameters
parser.add_argument('-m', '--module_cmake', action='store_true', help='generate moudules.cmake file')
parser.add_argument('--build_dir', metavar='dir', type=str, default=os.path.join(srcdir, 'build'),
help='compile temp directory')
parser.add_argument('--output_dir', metavar='dir', type=str, default=os.path.join(srcdir, 'output'),
help='compile output directory')
parser.add_argument('--hkey', metavar='hkey', type=str, default="b8fc4931453af3285f0f",
help='Key used by the HMAC.')
# Configuration file
parser.add_argument('--feature_config', metavar='file_path', type=str, default='',
help='Configuration file of the compilation features.')
parser.add_argument('--compile_config', metavar='file_path', type=str, default='',
help='Configuration file of compilation parameters.')
# Compilation Feature Configuration
parser.add_argument('--enable', metavar='feature', nargs='+', default=[],
help='enable some libs or features, such as --enable sha256 aes gcm_asm, default is "all"')
parser.add_argument('--disable', metavar='feature', nargs='+', default=['uio_sctp'],
help='disable some libs or features, such as --disable aes gcm_asm,\
default is disable "uio_sctp" ')
parser.add_argument('--enable-sctp', action="store_true", help='enable sctp which is used in DTLS')
parser.add_argument('--asm_type', type=str, help='Assembly Type, default is "no_asm".')
parser.add_argument('--asm', metavar='feature', default=[], nargs='+', help='config asm, such as --asm sha2')
# System Configuration
parser.add_argument('--system', type=str,
help='To enable feature "sal_xxx", should specify the system.')
parser.add_argument('--endian', metavar='little|big', type=str, choices=['little', 'big'],
help='Specify the platform endianness as little or big, default is "little".')
parser.add_argument('--bits', metavar='32|64', type=int, choices=[32, 64],
help='To enable feature "bn", should specify the number of OS bits, default is "64".')
# Compiler Options, Link Options
parser.add_argument('--lib_type', choices=['static', 'shared', 'object'], nargs='+',
help='set lib type, such as --lib_type staic shared, default is "staic shared object"')
parser.add_argument('--add_options', default='', type=str,
help='add some compile options, such as --add_options="-O0 -g"')
parser.add_argument('--del_options', default='', type=str,
help='delete some compile options such as --del_options="-O2 -Werror"')
parser.add_argument('--add_link_flags', default='', type=str,
help='add some link flags such as --add_link_flags="-pie"')
parser.add_argument('--del_link_flags', default='', type=str,
help='delete some link flags such as --del_link_flags="-shared -Wl,-z,relro"')
parser.add_argument('--no_config_check', action='store_true', help='disable the configuration check')
parser.add_argument('--hitls_version', default='openHiTLS 0.2.0 15 May 2025', help='%(prog)s version str')
parser.add_argument('--hitls_version_num', default=0x00200000, help='%(prog)s version num')
parser.add_argument('--bundle_libs', action='store_true', help='Indicates that multiple libraries are bundled together. By default, it is not bound.\
It need to be used together with "-m"')
# Compile the command apps.
parser.add_argument('--executes', dest='executes', default=[], nargs='*', help='Enable hitls command apps')
args = vars(parser.parse_args())
args['tmp_feature_config'] = os.path.join(args['build_dir'], 'feature_config.json')
args['tmp_compile_config'] = os.path.join(args['build_dir'], 'compile_config.json')
# disable uio_sctp by default
if args['enable_sctp'] or args['module_cmake']:
if 'uio_sctp' in args['disable']:
args['disable'].remove('uio_sctp')
except argparse.ArgumentError as e:
parser.print_help()
raise ValueError("Error: Failed to obtain parameters.") from e
return argparse.Namespace(**args)
class Configure:
"""Provides operations related to configuration and input parameter parsing:
1 Parse input parameters.
2 Read configuration files and input parameters.
3 Update the final configuration files in the build directory.
"""
config_json_file = 'config.json'
feature_json_file = 'config/json/feature.json'
complete_options_json_file = 'config/json/complete_options.json'
default_compile_json_file = 'config/json/compile.json'
def __init__(self, features: FeatureParser):
self._features = features
self._args = get_cfg_args()
self._preprocess_args()
@property
def args(self):
return self._args
def _preprocess_args(self):
if self._args.feature_config and not os.path.exists(self._args.feature_config):
raise FileNotFoundError('File not found: %s' % self._args.feature_config)
if self._args.compile_config and not os.path.exists(self._args.compile_config):
raise FileNotFoundError('File not found: %s' % self._args.compile_config)
if 'all' in self._args.enable:
if len(self._args.enable) > 1:
raise ValueError("Error: 'all' and other features cannot be set at the same time.")
else:
for fea in self._args.enable:
if fea in self._features.libs or fea in self._features.feas_info:
continue
raise ValueError("unrecognized fea '%s'" % fea)
if self._args.asm_type:
if self._args.asm_type not in self._features.asm_types:
raise ValueError("Unsupported asm_type: asm_type should be one of [%s]" % self._features.asm_types)
else:
if self._args.asm and not self._args.asm_type:
raise ValueError("Error: 'asm_type' and 'asm' must be set at the same time.")
# The value of 'asm' will be verified later.
@staticmethod
def _load_config(is_fea_cfg, src_file, dest_file):
if os.path.exists(dest_file):
if src_file != '':
raise FileExistsError('{} already exists'.format(dest_file))
else:
if src_file == '':
# No custom configuration file is specified, create a default config file.
cfg = FeatureConfigParser.default_cfg() if is_fea_cfg else CompileConfigParser.default_cfg()
save_json_file(cfg, dest_file)
else:
copy_file(src_file, dest_file)
def load_config_to_build(self):
"""Load the compilation feature and compilation option configuration files to the build directory:
build/feature_config.json
build/compile_config.json
"""
if not os.path.exists(self._args.build_dir):
os.makedirs(self._args.build_dir)
self._load_config(True, self._args.feature_config, self._args.tmp_feature_config)
self._load_config(False, self._args.compile_config, self._args.tmp_compile_config)
def update_feature_config(self, gen_cmake):
"""Update the feature configuration file in the build based on the input parameters."""
conf_custom_feature = FeatureConfigParser(self._features, self._args.tmp_feature_config)
if self._args.executes:
conf_custom_feature.enable_executes(self._args.executes)
# If no feature is enabled before modules.cmake is generated, set enable to "all".
if not conf_custom_feature.libs and not self._args.enable and gen_cmake:
self._args.enable = ['all']
# Set parameters by referring to "FeatureConfigParser.key_value".
conf_custom_feature.set_param('libType', self._args.lib_type)
if self._args.bundle_libs:
conf_custom_feature.set_param('bundleLibs', self._args.bundle_libs)
conf_custom_feature.set_param('endian', self._args.endian)
conf_custom_feature.set_param('system', self._args.system, False)
conf_custom_feature.set_param('bits', self._args.bits, False)
enable_feas, asm_feas = conf_custom_feature.get_enable_feas(self._args.enable, self._args.asm)
asm_type = self._args.asm_type if self._args.asm_type else ''
if not asm_type and conf_custom_feature.asm_type != 'no_asm':
asm_type = conf_custom_feature.asm_type
if asm_type:
conf_custom_feature.set_asm_type(asm_type)
conf_custom_feature.set_asm_features(enable_feas, asm_feas, asm_type)
if enable_feas:
conf_custom_feature.set_c_features(enable_feas)
self._args.securec_lib = conf_custom_feature.securec_lib
# update feature and resave file.
conf_custom_feature.update_feature(self._args.enable, self._args.disable, gen_cmake)
conf_custom_feature.save(self._args.tmp_feature_config)
self._args.bundle_libs = conf_custom_feature.bundle_libs
def update_compile_config(self, all_options: CompleteOptionParser):
"""Update the compilation configuration file in the build based on the input parameters."""
conf_custom_compile = CompileConfigParser(all_options, self._args.tmp_compile_config)
if self._args.add_options:
conf_custom_compile.change_options(self._args.add_options.strip().split(' '), True)
if self._args.del_options:
conf_custom_compile.change_options(self._args.del_options.strip().split(' '), False)
if self._args.add_link_flags:
conf_custom_compile.change_link_flags(self._args.add_link_flags.strip().split(' '), True)
if self._args.del_link_flags:
conf_custom_compile.change_link_flags(self._args.del_link_flags.strip().split(' '), False)
conf_custom_compile.save(self._args.tmp_compile_config)
class CMakeGenerator:
""" Generating CMake Commands and Scripts Based on Configuration Files """
def __init__(self, args, features: FeatureParser, all_options: CompleteOptionParser):
self._args = args
self._cfg_feature = features
self._cfg_compile = CompileParser(all_options, Configure.default_compile_json_file)
self._cfg_custom_feature = FeatureConfigParser(features, args.tmp_feature_config)
self._cfg_custom_feature.check_fea_opts()
self._cfg_custom_compile = CompileConfigParser(all_options, args.tmp_compile_config)
self._asm_type = self._cfg_custom_feature.asm_type
self._platform = 'linux'
self._approved_provider = False
self._hmac = "sha256"
@staticmethod
def _add_if_exists(inc_dirs, path):
if os.path.exists(path):
inc_dirs.add(path)
@staticmethod
def _get_common_include(modules: list):
""" modules: ['::','::']"""
inc_dirs = set()
top_modules = set(x.split('::')[0] for x in modules)
top_modules.add('bsl/log')
top_modules.add('bsl/err')
for module in top_modules:
CMakeGenerator._add_if_exists(inc_dirs, module + '/include')
CMakeGenerator._add_if_exists(inc_dirs, 'include/' + module)
CMakeGenerator._add_if_exists(inc_dirs, 'config/macro_config')
CMakeGenerator._add_if_exists(inc_dirs, '../../../../Secure_C/include')
CMakeGenerator._add_if_exists(inc_dirs, '../../../platform/Secure_C/include')
return inc_dirs
def _get_module_include(self, mod: str, dep_mods: list):
inc_dirs = set()
dep_mods.append(mod)
for dep in dep_mods:
top_dir, sub_dir = dep.split('::')
path = "{}/{}/include".format(top_dir, sub_dir)
if os.path.exists(path):
inc_dirs.add(path)
top_mod, sub_mod = dep.split('::')
cfg_inc = self._cfg_feature.modules[top_mod][sub_mod].get('.include', [])
for inc_dir in cfg_inc:
if os.path.exists(inc_dir):
inc_dirs.add(inc_dir)
return inc_dirs
@staticmethod
def _expand_srcs(srcs):
if not srcs:
return []
ret = []
for x in srcs:
ret += glob.glob(x, recursive=True)
if len(ret) == 0:
raise SystemError("The .c file does not exist in the {} directory.".format(srcs))
ret.sort()
return ret
@classmethod
def _gen_cmd_cmake(cls, cmd: str, title, content_obj=None):
if not content_obj:
return '{}({})\n'.format(cmd, title)
items = None
if isinstance(content_obj, list) or isinstance(content_obj, set):
items = content_obj
elif isinstance(content_obj, dict):
items = content_obj.values()
elif isinstance(content_obj, str):
items = [content_obj]
else:
raise ValueError('Unsupported type "%s"' % type(content_obj))
content = ''
for item in items:
content += ' {}\n'.format(item)
if len(items) == 1:
return '{}({} {})\n'.format(cmd, title, item)
else:
return '{}({}\n{})\n'.format(cmd, title, content)
def _get_module_src_set(self, lib, top_mod, sub_mod, mod_obj):
srcs = self._cfg_feature.get_mod_srcs(top_mod, sub_mod, mod_obj)
return self._expand_srcs(srcs)
def _gen_module_cmake(self, lib, mod, mod_obj, mods_cmake):
top_mod, module_name = mod.split('::')
inc_set = self._get_module_include(mod, mod_obj.get('deps', []))
src_list = self._get_module_src_set(lib, top_mod, module_name, mod_obj)
tgt_name = module_name + '-objs'
cmake = '\n# Add module {} \n'.format(module_name)
cmake += self._gen_cmd_cmake('add_library', '{} OBJECT'.format(tgt_name))
cmake += self._gen_cmd_cmake('target_include_directories', '{} PRIVATE'.format(tgt_name), inc_set)
cmake += self._gen_cmd_cmake('target_sources', '{} PRIVATE'.format(tgt_name), src_list)
mods_cmake[tgt_name] = cmake
def _gen_shared_lib_cmake(self, lib_name, tgt_obj_list, tgt_list, macros):
tgt_name = lib_name + '-shared'
properties = 'OUTPUT_NAME {}'.format(lib_name)
cmake = '\n'
cmake += self._gen_cmd_cmake('add_library', '{} SHARED'.format(tgt_name), tgt_obj_list)
cmake += self._gen_cmd_cmake('target_link_options', '{} PRIVATE'.format(tgt_name), '${SHARED_LNK_FLAGS}')
if os.path.exists('{}/platform/Secure_C/lib'.format(srcdir)):
cmake += self._gen_cmd_cmake('target_link_directories', '{} PRIVATE'.format(tgt_name), '{}/platform/Secure_C/lib'.format(srcdir))
cmake += self._gen_cmd_cmake('set_target_properties', '{} PROPERTIES'.format(tgt_name), properties)
cmake += 'install(TARGETS %s DESTINATION ${CMAKE_INSTALL_PREFIX}/lib)\n' % tgt_name
if (self._approved_provider):
# Use the openssl command to generate an HMAC file.
cmake += 'install(CODE "execute_process(COMMAND openssl dgst -hmac \\\"%s\\\" -%s -out lib%s.so.hmac lib%s.so)")\n' % (self._args.hkey, self._hmac, lib_name, lib_name)
# Install the hmac file to the output directory.
cmake += 'install(CODE "execute_process(COMMAND cp lib%s.so.hmac ${CMAKE_INSTALL_PREFIX}/lib/lib%s.so.hmac)")\n' % (lib_name, lib_name)
if lib_name == 'hitls_bsl':
for item in macros:
if item == '-DHITLS_BSL_UIO' or item == '-DHITLS_BSL_UIO_SCTP':
cmake += self._gen_cmd_cmake("target_link_directories", "hitls_bsl-shared PRIVATE " + "${CMAKE_SOURCE_DIR}/platform/Secure_C/lib")
cmake += self._gen_cmd_cmake("target_link_libraries", "hitls_bsl-shared " + str(self._args.securec_lib))
if item == '-DHITLS_BSL_SAL_DL':
cmake += self._gen_cmd_cmake("target_link_directories", "hitls_bsl-shared PRIVATE " + "${CMAKE_SOURCE_DIR}/platform/Secure_C/lib")
cmake += self._gen_cmd_cmake("target_link_libraries", "hitls_bsl-shared dl " + str(self._args.securec_lib))
if lib_name == 'hitls_crypto':
cmake += self._gen_cmd_cmake("target_link_directories", "hitls_crypto-shared PRIVATE " + "${CMAKE_SOURCE_DIR}/platform/Secure_C/lib")
cmake += self._gen_cmd_cmake("target_link_libraries", "hitls_crypto-shared hitls_bsl-shared " + str(self._args.securec_lib))
if lib_name == 'hitls_tls':
cmake += self._gen_cmd_cmake("target_link_directories", "hitls_tls-shared PRIVATE " + "${CMAKE_SOURCE_DIR}/platform/Secure_C/lib")
cmake += self._gen_cmd_cmake("target_link_libraries", "hitls_tls-shared hitls_pki-shared hitls_crypto-shared hitls_bsl-shared " + str(self._args.securec_lib))
if lib_name == 'hitls_pki':
cmake += self._gen_cmd_cmake("target_link_directories", "hitls_pki-shared PRIVATE " + "${CMAKE_SOURCE_DIR}/platform/Secure_C/lib")
cmake += self._gen_cmd_cmake(
"target_link_libraries", "hitls_pki-shared hitls_crypto-shared hitls_bsl-shared " + str(self._args.securec_lib))
if lib_name == 'hitls_auth':
cmake += self._gen_cmd_cmake("target_link_directories", "hitls_auth-shared PRIVATE " + "${CMAKE_SOURCE_DIR}/platform/Secure_C/lib")
cmake += self._gen_cmd_cmake(
"target_link_libraries", "hitls_auth-shared hitls_crypto-shared hitls_bsl-shared " + str(self._args.securec_lib))
if self._approved_provider:
cmake += self._gen_cmd_cmake("target_link_libraries", "hitls-shared m " + str(self._args.securec_lib))
tgt_list.append(tgt_name)
return cmake
def _gen_static_lib_cmake(self, lib_name, tgt_obj_list, tgt_list):
tgt_name = lib_name + '-static'
properties = 'OUTPUT_NAME {}'.format(lib_name)
cmake = '\n'
cmake += self._gen_cmd_cmake('add_library', '{} STATIC'.format(tgt_name), tgt_obj_list)
cmake += self._gen_cmd_cmake('set_target_properties', '{} PROPERTIES'.format(tgt_name), properties)
cmake += 'install(TARGETS %s DESTINATION ${CMAKE_INSTALL_PREFIX}/lib)\n' % tgt_name
tgt_list.append(tgt_name)
return cmake
def _gen_obejct_lib_cmake(self, lib_name, tgt_obj_list, tgt_list):
tgt_name = lib_name + '-object'
properties = 'OUTPUT_NAME lib{}.o'.format(lib_name)
cmake = '\n'
cmake += self._gen_cmd_cmake('add_executable', tgt_name, tgt_obj_list)
cmake += self._gen_cmd_cmake('target_link_options', '{} PRIVATE'.format(tgt_name), '${SHARED_LNK_FLAGS}')
cmake += self._gen_cmd_cmake('set_target_properties', '{} PROPERTIES'.format(tgt_name), properties)
cmake += 'install(TARGETS %s DESTINATION ${CMAKE_INSTALL_PREFIX}/obj)\n' % tgt_name
tgt_list.append(tgt_name)
return cmake
def _get_definitions(self):
ret = '"${CMAKE_C_FLAGS} -DOPENHITLS_VERSION_S=\'\\"%s\\"\' -DOPENHITLS_VERSION_I=%lu %s' % (
self._args.hitls_version, self._args.hitls_version_num, '-D__FILENAME__=\'\\"$(notdir $(subst .o,,$@))\\"\'')
if self._approved_provider:
icv_key = '-DCMVP_INTEGRITYKEY=\'\\"%s\\"\'' % self._args.hkey
ret += ' %s' % icv_key
ret += '"'
return ret
def _gen_lib_cmake(self, lib_name, inc_dirs, lib_obj, macros):
lang = self._cfg_feature.libs[lib_name].get('lang', 'C')
cmake = 'project({} {})\n\n'.format(lib_name, lang)
cmake += self._gen_cmd_cmake('set', 'CMAKE_ASM_NASM_OBJECT_FORMAT elf64')
cmake += self._gen_cmd_cmake('set', 'CMAKE_C_FLAGS', '${CC_ALL_OPTIONS}')
cmake += self._gen_cmd_cmake('set', 'CMAKE_ASM_FLAGS', '${CC_ALL_OPTIONS}')
cmake += self._gen_cmd_cmake('set', 'CMAKE_C_FLAGS', self._get_definitions())
cmake += self._gen_cmd_cmake('include_directories', '', inc_dirs)
for _, mod_cmake in lib_obj['mods_cmake'].items():
cmake += mod_cmake
tgt_obj_list = list('$<TARGET_OBJECTS:{}>'.format(x) for x in lib_obj['mods_cmake'].keys())
tgt_list = []
lib_type = self._cfg_custom_feature.lib_type
if 'shared' in lib_type:
cmake += self._gen_shared_lib_cmake(lib_name, tgt_obj_list, tgt_list, macros)
if 'static' in lib_type:
cmake += self._gen_static_lib_cmake(lib_name, tgt_obj_list, tgt_list)
if 'object' in lib_type:
cmake += self._gen_obejct_lib_cmake(lib_name, tgt_obj_list, tgt_list)
lib_obj['cmake'] = cmake
lib_obj['targets'] = tgt_list
def _gen_exe_cmake(self, exe_name, inc_dirs, exe_obj):
lang = self._cfg_feature.executes[exe_name].get('lang', 'C')
definitions = '"${CMAKE_C_FLAGS} -DHITLS_VERSION=\'\\"%s\\"\' %s"' % (
self._args.hitls_version, '-D__FILENAME__=\'\\"$(notdir $(subst .o,,$@))\\"\'')
cmake = 'project({} {})\n\n'.format(exe_name, lang)
cmake += self._gen_cmd_cmake('set', 'CMAKE_C_FLAGS', '${CC_ALL_OPTIONS}')
cmake += self._gen_cmd_cmake('set', 'CMAKE_C_FLAGS', definitions)
cmake += self._gen_cmd_cmake('include_directories', '', inc_dirs)
for _, mod_cmake in exe_obj['mods_cmake'].items():
cmake += mod_cmake
tgt_obj_list = list('$<TARGET_OBJECTS:{}>'.format(x) for x in exe_obj['mods_cmake'].keys())
cmake += self._gen_cmd_cmake('add_executable', exe_name, tgt_obj_list)
lib_type = self._cfg_custom_feature.lib_type
if 'shared' in lib_type:
cmake += self._gen_cmd_cmake('add_dependencies', exe_name,
'hitls_pki-shared hitls_crypto-shared hitls_bsl-shared')
elif 'static' in lib_type:
cmake += self._gen_cmd_cmake('add_dependencies', exe_name,
'hitls_pki-static hitls_crypto-static hitls_bsl-static')
common_link_dir = [
'${CMAKE_CURRENT_LIST_DIR}', # libhitls_*
'${CMAKE_SOURCE_DIR}/platform/Secure_C/lib',
]
common_link_lib = [
'hitls_pki', 'hitls_crypto', 'hitls_bsl',
'dl', 'pthread', 'm',
str(self._args.securec_lib)
]
cmake += self._gen_cmd_cmake('list', 'APPEND HITLS_APP_LINK_DIRS', common_link_dir)
cmake += self._gen_cmd_cmake('list', 'APPEND HITLS_APP_LINK_LIBS', common_link_lib)
cmake += self._gen_cmd_cmake('target_link_directories', '%s PRIVATE' % exe_name, '${HITLS_APP_LINK_DIRS}')
cmake += self._gen_cmd_cmake('target_link_libraries', exe_name, '${HITLS_APP_LINK_LIBS}')
cmake += self._gen_cmd_cmake('target_link_options', '{} PRIVATE'.format(exe_name), '${EXE_LNK_FLAGS}')
cmake += 'install(TARGETS %s DESTINATION ${CMAKE_INSTALL_PREFIX})\n' % exe_name
exe_obj['cmake'] = cmake
exe_obj['targets'] = [exe_name]
def _gen_bundled_lib_cmake(self, lib_name, inc_dirs, projects, macros):
lang = 'C ASM'
if 'mpa' in projects.keys():
lang += 'ASM_NASM'
cmake = 'project({} {})\n\n'.format(lib_name, lang)
cmake += self._gen_cmd_cmake('set', 'CMAKE_ASM_NASM_OBJECT_FORMAT elf64')
cmake += self._gen_cmd_cmake('set', 'CMAKE_C_FLAGS', '${CC_ALL_OPTIONS}')
cmake += self._gen_cmd_cmake('set', 'CMAKE_ASM_FLAGS', '${CC_ALL_OPTIONS}')
cmake += self._gen_cmd_cmake('set', 'CMAKE_C_FLAGS', self._get_definitions())
cmake += self._gen_cmd_cmake('include_directories', '', inc_dirs)
tgt_obj_list = []
for _, lib_obj in projects.items():
tgt_obj_list.extend(list('$<TARGET_OBJECTS:{}>'.format(x) for x in lib_obj['mods_cmake'].keys()))
for _, mod_cmake in lib_obj['mods_cmake'].items():
cmake += mod_cmake
tgt_list = []
lib_type = self._cfg_custom_feature.lib_type
if 'shared' in lib_type:
cmake += self._gen_shared_lib_cmake(lib_name, tgt_obj_list, tgt_list, macros)
if 'static' in lib_type:
cmake += self._gen_static_lib_cmake(lib_name, tgt_obj_list, tgt_list)
if 'object' in lib_type:
cmake += self._gen_obejct_lib_cmake(lib_name, tgt_obj_list, tgt_list)
return {lib_name: {'cmake': cmake, 'targets': tgt_list}}
def _gen_common_compile_c_flags(self):
return self._gen_cmd_cmake('set', 'CMAKE_C_FLAGS', self._get_definitions())
def _gen_projects_cmake(self, macros):
lib_enable_modules, exe_enable_modules = self._cfg_custom_feature.get_enable_modules()
projects = {}
all_inc_dirs = set()
for lib, lib_obj in lib_enable_modules.items():
projects[lib] = {}
projects[lib]['mods_cmake'] = {}
inc_dirs = self._get_common_include(lib_obj.keys())
for mod, mod_obj in lib_obj.items():
self._gen_module_cmake(lib, mod, mod_obj, projects[lib]['mods_cmake'])
if self._args.bundle_libs:
all_inc_dirs = all_inc_dirs.union(inc_dirs)
continue
self._gen_lib_cmake(lib, inc_dirs, projects[lib], macros)
if self._args.bundle_libs:
# update projects
projects = self._gen_bundled_lib_cmake('hitls', all_inc_dirs, projects, macros)
for exe, exe_obj in exe_enable_modules.items():
projects[exe] = {}
projects[exe]['mods_cmake'] = {}
inc_dirs = self._get_common_include(exe_obj.keys())
for mod, mod_obj in exe_obj.items():
self._gen_module_cmake(exe, mod, mod_obj, projects[exe]['mods_cmake'])
self._gen_exe_cmake(exe, inc_dirs, projects[exe])
return projects
def _gen_target_cmake(self, lib_tgts):
cmake = 'add_custom_target(openHiTLS)\n'
cmake += self._gen_cmd_cmake('add_dependencies', 'openHiTLS', lib_tgts)
return cmake
def _gen_set_param_cmake(self, macro_file):
compile_flags, link_flags = self._cfg_compile.union_options(self._cfg_custom_compile)
macros = self._cfg_custom_feature.get_fea_macros()
macros.sort()
if self._args.no_config_check:
macros.append('-DHITLS_NO_CONFIG_CHECK')
if '-DHITLS_CRYPTO_CMVP_ISO19790' in compile_flags:
self._approved_provider = True
self._hmac = "sha256"
elif '-DHITLS_CRYPTO_CMVP_SM' in compile_flags:
self._approved_provider = True
self._hmac = "sm3"
compile_flags.extend(macros)
hitls_macros = list(filter(lambda x: '-DHITLS' in x, compile_flags))
with open(macro_file, "w") as f:
f.write(" ".join(hitls_macros))
f.close()
self._cc_all_options = compile_flags
compile_flags_str = '"{}"'.format(" ".join(compile_flags))
shared_link_flags = '{}'.format(" ".join(link_flags['SHARED']) + " " + " ".join(link_flags['PUBLIC']))
exe_link_flags = '{}'.format(" ".join(link_flags['EXE']) + " " + " ".join(link_flags['PUBLIC']))
cmake = self._gen_cmd_cmake('set', 'CC_ALL_OPTIONS', compile_flags_str) + "\n"
cmake += self._gen_cmd_cmake('set', 'SHARED_LNK_FLAGS', shared_link_flags) + "\n"
cmake += self._gen_cmd_cmake('set', 'EXE_LNK_FLAGS', exe_link_flags) + "\n"
return cmake, macros
def out_cmake(self, cmake_path, macro_file):
self._cfg_custom_feature.check_bn_config()
set_param_cmake, macros = self._gen_set_param_cmake(macro_file)
set_param_cmake += self._gen_common_compile_c_flags()
projects = self._gen_projects_cmake(macros)
lib_tgts = list(tgt for lib_obj in projects.values() for tgt in lib_obj['targets'])
bottom_cmake = self._gen_target_cmake(lib_tgts)
with open(cmake_path, "w") as f:
f.write(set_param_cmake)
for lib_obj in projects.values():
f.write(lib_obj['cmake'])
f.write('\n\n')
f.write(bottom_cmake)
def main():
os.chdir(srcdir)
# The Python version cannot be earlier than 3.5.
if sys.version_info < (3, 5):
print("your python version %d.%d should not be lower than 3.5" % tuple(sys.version_info[:2]))
raise Exception("your python version %d.%d should not be lower than 3.5" % tuple(sys.version_info[:2]))
conf_feature = FeatureParser(Configure.feature_json_file)
complete_options = CompleteOptionParser(Configure.complete_options_json_file)
cfg = Configure(conf_feature)
cfg.load_config_to_build()
cfg.update_feature_config(cfg.args.module_cmake)
cfg.update_compile_config(complete_options)
if cfg.args.module_cmake:
tmp_cmake = os.path.join(cfg.args.build_dir, 'modules.cmake')
macro_file = os.path.join(cfg.args.build_dir, 'macro.txt')
if (os.path.exists(macro_file)):
os.remove(macro_file)
CMakeGenerator(cfg.args, conf_feature, complete_options).out_cmake(tmp_cmake, macro_file)
if __name__ == '__main__':
try:
main()
except SystemExit:
exit(0)
except:
traceback.print_exc()
exit(2)
| 2302_82127028/openHiTLS-examples_1508 | configure.py | Python | unknown | 33,691 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#ifndef CRYPT_AES_H
#define CRYPT_AES_H
#include "hitls_build.h"
#ifdef HITLS_CRYPTO_AES
#include <stdint.h>
#ifdef __cplusplus
extern "C" {
#endif // __cplusplus
#define CRYPT_AES_128 128
#define CRYPT_AES_192 192
#define CRYPT_AES_256 256
#define CRYPT_AES_MAX_ROUNDS 14
#define CRYPT_AES_MAX_KEYLEN (4 * (CRYPT_AES_MAX_ROUNDS + 1))
/**
* @ingroup CRYPT_AES_Key
*
* aes key structure
*/
typedef struct {
uint32_t key[CRYPT_AES_MAX_KEYLEN];
uint32_t rounds;
} CRYPT_AES_Key;
/**
* @ingroup aes
* @brief Set the AES encryption key.
*
* @param ctx [IN] AES handle
* @param key [IN] Encryption key
* @param len [IN] Key length. The value must be 16 bytes.
*/
int32_t CRYPT_AES_SetEncryptKey128(CRYPT_AES_Key *ctx, const uint8_t *key, uint32_t len);
/**
* @ingroup aes
* @brief Set the AES encryption key.
*
* @param ctx [IN] AES handle
* @param key [IN] Encryption key
* @param len [IN] Key length. The value must be 24 bytes.
*/
int32_t CRYPT_AES_SetEncryptKey192(CRYPT_AES_Key *ctx, const uint8_t *key, uint32_t len);
/**
* @ingroup aes
* @brief Set the AES encryption key.
*
* @param ctx [IN] AES handle
* @param key [IN] Encryption key
* @param len [IN] Key length. The value must be 32 bytes.
*/
int32_t CRYPT_AES_SetEncryptKey256(CRYPT_AES_Key *ctx, const uint8_t *key, uint32_t len);
/**
* @ingroup aes
* @brief Set the AES decryption key.
*
* @param ctx [IN] AES handle
* @param key [IN] Decryption key
* @param len [IN] Key length. The value must be 16 bytes.
*/
int32_t CRYPT_AES_SetDecryptKey128(CRYPT_AES_Key *ctx, const uint8_t *key, uint32_t len);
/**
* @ingroup aes
* @brief Set the AES decryption key.
*
* @param ctx [IN] AES handle
* @param key [IN] Decryption key
* @param len [IN] Key length. The value must be 24 bytes.
*/
int32_t CRYPT_AES_SetDecryptKey192(CRYPT_AES_Key *ctx, const uint8_t *key, uint32_t len);
/**
* @ingroup aes
* @brief Set the AES decryption key.
*
* @param ctx [IN] AES handle
* @param key [IN] Decryption key
* @param len [IN] Key length. The value must be 32 bytes.
*/
int32_t CRYPT_AES_SetDecryptKey256(CRYPT_AES_Key *ctx, const uint8_t *key, uint32_t len);
/**
* @ingroup aes
* @brief AES encryption
*
* @param ctx [IN] AES handle, storing keys
* @param in [IN] Input plaintext data. The value must be 16 bytes.
* @param out [OUT] Output ciphertext data. The length is 16 bytes.
* @param len [IN] Block length.
*/
int32_t CRYPT_AES_Encrypt(const CRYPT_AES_Key *ctx, const uint8_t *in, uint8_t *out, uint32_t len);
/**
* @ingroup aes
* @brief AES decryption
*
* @param ctx [IN] AES handle, storing keys
* @param in [IN] Input ciphertext data. The value must be 16 bytes.
* @param out [OUT] Output plaintext data. The length is 16 bytes.
* @param len [IN] Block length. The length is 16.
*/
int32_t CRYPT_AES_Decrypt(const CRYPT_AES_Key *ctx, const uint8_t *in, uint8_t *out, uint32_t len);
#ifdef HITLS_CRYPTO_CBC
/**
* @ingroup aes
* @brief AES cbc encryption
*
* @param ctx [IN] AES handle, storing keys
* @param in [IN] Input plaintext data, 16 bytes.
* @param out [OUT] Output ciphertext data. The length is 16 bytes.
* @param len [IN] Block length.
* @param iv [IN] Initialization vector.
*/
int32_t CRYPT_AES_CBC_Encrypt(const CRYPT_AES_Key *ctx, const uint8_t *in, uint8_t *out, uint32_t len, uint8_t *iv);
/**
* @ingroup aes
* @brief AES cbc decryption
*
* @param ctx [IN] AES handle, storing keys
* @param in [IN] Input ciphertext data. The value is 16 bytes.
* @param out [OUT] Output plaintext data. The length is 16 bytes.
* @param len [IN] Block length.
* @param iv [IN] Initialization vector.
*/
int32_t CRYPT_AES_CBC_Decrypt(const CRYPT_AES_Key *ctx, const uint8_t *in, uint8_t *out, uint32_t len, uint8_t *iv);
#endif /* HITLS_CRYPTO_CBC */
#if defined(HITLS_CRYPTO_CTR) || defined(HITLS_CRYPTO_GCM)
/**
* @ingroup aes
* @brief AES ctr encryption
*
* @param ctx [IN] AES handle, storing keys
* @param in [IN] Input plaintext data, 16 bytes.
* @param out [OUT] Output ciphertext data. The length is 16 bytes.
* @param len [IN] Block length.
* @param iv [IN] Initialization vector.
*/
int32_t CRYPT_AES_CTR_Encrypt(const CRYPT_AES_Key *ctx, const uint8_t *in, uint8_t *out, uint32_t len, uint8_t *iv);
#endif
#ifdef HITLS_CRYPTO_ECB
/**
* @ingroup aes
* @brief AES ecb encryption
*
* @param ctx [IN] AES handle, storing keys
* @param in [IN] Input plaintext data. The length is a multiple of 16 bytes.
* @param out [OUT] Output ciphertext data. The length is a multiple of 16 bytes.
* @param len [IN] Block length.
*/
int32_t CRYPT_AES_ECB_Encrypt(const CRYPT_AES_Key *ctx, const uint8_t *in, uint8_t *out, uint32_t len);
/**
* @ingroup aes
* @brief AES ecb decryption
*
* @param ctx [IN] AES handle, storing keys
* @param in [IN] Input ciphertext data. The value is 16 bytes.
* @param out [OUT] Output plaintext data. The length is 16 bytes.
* @param len [IN] Block length.
*/
int32_t CRYPT_AES_ECB_Decrypt(const CRYPT_AES_Key *ctx, const uint8_t *in, uint8_t *out, uint32_t len);
#endif
#ifdef HITLS_CRYPTO_CFB
/**
* @brief Decryption in CFB mode
*
* @param ctx [IN] Mode handle
* @param in [IN] Data to be encrypted
* @param out [OUT] Encrypted data
* @param len [IN] Data length
* @param iv [IN] Initial vector
* @return Success response: CRYPT_SUCCESS
* Returned upon failure: Other error codes.
*/
int32_t CRYPT_AES_CFB_Decrypt(const CRYPT_AES_Key *ctx, const uint8_t *in, uint8_t *out, uint32_t len, uint8_t *iv);
#endif
#ifdef HITLS_CRYPTO_XTS
/**
* @ingroup aes
* @brief AES xts encryption
*
* @param ctx [IN] AES key
* @param in [IN] Input plaintext.
* @param out [OUT] Output ciphertext.
* @param len [IN] Input length. The length is guaraenteed to be greater than block-size.
* @param tweak [IN/OUT] XTS tweak.
*/
int32_t CRYPT_AES_XTS_Encrypt(const CRYPT_AES_Key *ctx, const uint8_t *in,
uint8_t *out, uint32_t len, const uint8_t *tweak);
/**
* @ingroup aes
* @brief AES xts decryption
*
* @param ctx [IN] AES handle, storing keys
* @param in [IN] Input ciphertext data. The value is 16 bytes.
* @param out [OUT] Output plaintext data. The length is 16 bytes.
* @param len [IN] Block length.
* @param t [IN/OUT] XTS tweak.
*/
int32_t CRYPT_AES_XTS_Decrypt(const CRYPT_AES_Key *ctx, const uint8_t *in,
uint8_t *out, uint32_t len, const uint8_t *t);
#endif
/**
* @ingroup aes
* @brief Delete the AES key information.
*
* @param ctx [IN] AES handle, storing keys
* @return void
*/
void CRYPT_AES_Clean(CRYPT_AES_Key *ctx);
#ifdef __cplusplus
}
#endif // __cplusplus
#endif // HITLS_CRYPTO_AES
#endif // CRYPT_AES_H
| 2302_82127028/openHiTLS-examples_1508 | crypto/aes/include/crypt_aes.h | C | unknown | 7,250 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#include "hitls_build.h"
#ifdef HITLS_CRYPTO_AES
#include "crypt_arm.h"
#include "crypt_aes_macro_armv8.s"
.file "crypt_aes_armv8.S"
.text
.arch armv8-a+crypto
KEY .req x0
IN .req x1
OUT .req x2
ROUNDS .req w6
RDK0 .req v17
RDK1 .req v18
.section .rodata
.align 5
.g_cron:
.long 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36
.align 5
/*
* In Return-oriented programming (ROP) and Jump-oriented programming (JOP), we explored features
* that Arm introduced to the Arm architecture to mitigate against JOP-style and ROP-style attacks.
* ...
* Whether the combined or NOP-compatible instructions are set depends on the architecture
* version that the code is built for. When building for Armv8.3-A, or later, the compiler will use
* the combined operations. When building for Armv8.2-A, or earlier, it will use the NOP compatible
* instructions.
*
* The paciasp and autiasp instructions are used for function pointer authentication.
* The pointer authentication feature is added in armv8.3 and is supported only by AArch64.
* The addition of pointer authentication features is described in Section A2.6.1 of
* DDI0487H_a_a-profile_architecture_reference_manual.pdf.
*/
/*
* int32_t CRYPT_AES_Encrypt(const CRYPT_AES_Key *ctx,
* const uint8_t *in,
* uint8_t *out,
* uint32_t len);
*/
.text
.globl CRYPT_AES_Encrypt
.type CRYPT_AES_Encrypt, %function
.align 5
CRYPT_AES_Encrypt:
.ecb_aesenc_start:
AARCH64_PACIASP
stp x29, x30, [sp, #-16]!
add x29, sp, #0
ld1 {BLK0.16b}, [IN]
AES_ENC_1_BLK KEY BLK0.16b RDK0.4s RDK1.4s RDK0.16b RDK1.16b ROUNDS
st1 {BLK0.16b}, [OUT]
eor x0, x0, x0
eor RDK0.16b, RDK0.16b, RDK0.16b
eor RDK1.16b, RDK1.16b, RDK1.16b
ldp x29, x30, [sp], #16
AARCH64_AUTIASP
ret
.size CRYPT_AES_Encrypt, .-CRYPT_AES_Encrypt
/*
* int32_t CRYPT_AES_Decrypt(const CRYPT_AES_Key *ctx,
* const uint8_t *in,
* uint8_t *out,
* uint32_t len);
*/
.globl CRYPT_AES_Decrypt
.type CRYPT_AES_Decrypt, %function
.align 5
CRYPT_AES_Decrypt:
.ecb_aesdec_start:
AARCH64_PACIASP
stp x29, x30, [sp, #-16]!
add x29, sp, #0
ld1 {BLK0.16b}, [IN]
AES_DEC_1_BLK KEY BLK0.16b RDK0.4s RDK1.4s RDK0.16b RDK1.16b ROUNDS
st1 {BLK0.16b}, [OUT]
eor x0, x0, x0
eor RDK0.16b, RDK0.16b, RDK0.16b
eor RDK1.16b, RDK1.16b, RDK1.16b
ldp x29, x30, [sp], #16
AARCH64_AUTIASP
ret
.size CRYPT_AES_Decrypt, .-CRYPT_AES_Decrypt
/*
* void SetEncryptKey128(CRYPT_AES_Key *ctx, const uint8_t *key);
* Generating extended keys.
* x0 => CRYPT_AES_Key *ctx; x1 => const uint8_t *key
*/
.globl SetEncryptKey128
.type SetEncryptKey128, %function
.align 5
SetEncryptKey128:
.Lenc_key_128:
AARCH64_PACIASP
stp x29, x30, [sp, #-64]!
add x29, sp, #0
stp x25, x26, [sp, #16]
stp x23, x24, [sp, #32]
stp x21, x22, [sp, #48] // Register push stack completed.
adrp x23, .g_cron
add x23, x23, :lo12:.g_cron // Round key start address.
mov x24, x0 // Copy key string address. The address increases by 16 bytes.
ld1 {v1.16b}, [x1] // Reads the 16-byte key of a user.
mov w26, #10 // Number of encryption rounds, which is filled
// with rounds in the structure.
st1 {v1.4s}, [x0], #16 // Save the first key.
eor v0.16b, v0.16b, v0.16b // Clear zeros in V0.
mov w25, #10 // loop for 10 times.
.Lenc_key_128_loop:
ldr w21, [x23], #4 // Obtains the round constant.
dup v1.4s, v1.s[3] // Repeated four times,The last word of v1 is changed to v1 (128 bits).
ld1 {v2.4s}, [x24], #16 // Obtains the 4 words used for XOR.
ext v1.16b, v1.16b, v1.16b, #1 // Byte loop.
dup v3.4s, w21 // Repeat four times to change w21 to v3 (128 bits).
aese v1.16b, v0.16b // Xor then shift then sbox (XOR operation with 0 is itself,
// equivalent to omitting the XOR operation).
subs w25, w25, #1 // Count of 10-round key extension.
eor v1.16b, v1.16b, v3.16b // Round constant XOR.
eor v1.16b, v1.16b, v2.16b // 4 XOR operation (1).
ext v2.16b, v0.16b, v2.16b, #12 // 4321->3210.
eor v1.16b, v1.16b, v2.16b // 4 XOR operation (2).
ext v2.16b, v0.16b, v2.16b, #12 // 3210->2100.
eor v1.16b, v1.16b, v2.16b // 4 XOR operation (3).
ext v2.16b, v0.16b, v2.16b, #12 // 2100->1000.
eor v1.16b, v1.16b, v2.16b // 4 XOR operation (4).
st1 {v1.4s}, [x0], #16 // Stores the newly calculated 4-bytes key data into the key string.
b.ne .Lenc_key_128_loop // Loop jump.
str w26, [x0, #64] // Fill in the number of rounds.
eor x24, x24, x24 // Clear sensitivity.
eor x0, x0, x0
ldp x21, x22, [sp, #48]
ldp x23, x24, [sp, #32]
ldp x25, x26, [sp, #16]
ldp x29, x30, [sp], #64 // Pop stack completed.
AARCH64_AUTIASP
ret
.size SetEncryptKey128, .-SetEncryptKey128
/*
* void SetDecryptKey128(CRYPT_AES_Key *ctx, const uint8_t *key);
* Set a decryption key string.
* x0 => CRYPT_AES_Key *ctx; x1 => const uint8_t *key
*/
.globl SetDecryptKey128
.type SetDecryptKey128, %function
.align 5
SetDecryptKey128:
AARCH64_PACIASP
stp x29, x30, [sp, #-0x40]!
add x29, sp, #0
stp x25, x28, [sp, #0x10] // Register push stack completed.
stp d8, d9, [sp, #0x20]
stp d10, d11, [sp, #0x30]
mov x28, x0
bl .Lenc_key_128
ld1 {v0.4s}, [x28], #16
SETDECKEY_LDR_9_BLOCK x28
ld1 {v10.4s}, [x28]
mov x25, #-16
SETDECKEY_INVMIX_9_BLOCK
st1 {v0.4s}, [x28], x25
SETDECKEY_STR_9_BLOCK x28, x25
st1 {v10.4s}, [x28]
eor x28, x28, x28
eor x0, x0, x0
ldp d10, d11, [sp, #0x30]
ldp d8, d9, [sp, #0x20]
ldp x25, x28, [sp, #0x10]
ldp x29, x30, [sp], #0x40 // Stacking completed.
AARCH64_AUTIASP
ret
.size SetDecryptKey128, .-SetDecryptKey128
/*
* void SetEncryptKey192(CRYPT_AES_Key *ctx, const uint8_t *key);
* Generating extended keys.
* x0 => CRYPT_AES_Key *ctx; x1 => const uint8_t *key
*/
.globl SetEncryptKey192
.type SetEncryptKey192, %function
.align 5
SetEncryptKey192:
.Lenc_key_192:
AARCH64_PACIASP
stp x29, x30, [sp, #-64]!
add x29, sp, #0
stp x25, x26, [sp, #16]
stp x23, x24, [sp, #32]
stp x21, x22, [sp, #48] // Register push stack completed.
mov x24, x0 // Copy key string address. The address increases by 16 bytes.
ld1 {v0.16b}, [x1], #16 // Obtain the first 128-bit key.
mov w26, #12 // Number of encryption rounds.
st1 {v0.4s}, [x0], #16 // Store the first 128-bit key.
ld1 {v1.8b}, [x1] // Obtains the last 64-bit key.
adrp x23, .g_cron
add x23, x23, :lo12:.g_cron // Round key start address.
st1 {v1.2s}, [x0], #8 // Store the last 64-bit key.
eor v0.16b, v0.16b, v0.16b // Clear zeros in V0.
mov w25, #8 // loop for 8 times.
.Lenc_key_192_loop:
dup v1.4s, v1.s[1] // Repeated four times,The last word of v1 is changed to v1 (128 bits).
subs w25, w25, #1 // Count of 8-round key extensions.
ext v1.16b, v1.16b, v1.16b, #1 // Byte cycle.
ldr w22, [x23], #4 // Obtains the round constant.
aese v1.16b, v0.16b // Shift and sbox (XOR operation with 0 is itself,equivalent to omitting the XOR operation).
dup v2.4s, w22 // Repeat 4 times. W22 becomes v2(128bit).
eor v1.16b, v1.16b, v2.16b // Round constant XOR.
ld1 {v2.4s}, [x24], #16 // Obtains the 4 words used for XOR
eor v1.16b, v1.16b, v2.16b // 4 XOR operation (1).
ext v2.16b, v0.16b, v2.16b, #12 // 4321->3210.
eor v1.16b, v1.16b, v2.16b // 4 XOR operation (2).
ext v2.16b, v0.16b, v2.16b, #12 // 3210->2100.
eor v1.16b, v1.16b, v2.16b // 4 XOR operation (3).
ext v2.16b, v0.16b, v2.16b, #12 // 2100->1000.
eor v1.16b, v1.16b, v2.16b // 4 XOR operation (4).
st1 {v1.4s}, [x0], #16 // Stores the newly calculated 4-word key data into the key string.
ld1 {v2.2s}, [x24], #8 // Loads 6 words for the last 2 words of XOR.
dup v1.2s, v1.s[3] // Repeated two times,The last word of v1 is changed to v1 (64bit).
eor v1.8b, v1.8b, v2.8b // 2 XOR operation (1).
ext v2.8b, v0.8b, v2.8b, #4 // 21->10.
eor v1.8b, v1.8b, v2.8b // 2 XOR operation (2).
st1 {v1.2s}, [x0], #8 // Stores the newly calculated 2-word key data into the key string.
b.ne .Lenc_key_192_loop // Loop jump.
str w26, [x0, #24] // Fill in the number of rounds.
eor x24, x24, x24 // Clear sensitivity.
eor x0, x0, x0
ldp x21, x22, [sp, #48]
ldp x23, x24, [sp, #32]
ldp x25, x26, [sp, #16]
ldp x29, x30, [sp], #64 // Stacking completed.
AARCH64_AUTIASP
ret
.size SetEncryptKey192, .-SetEncryptKey192
/*
* void SetDecryptKey192(CRYPT_AES_Key *ctx, const uint8_t *key);
* Set a decryption key string.
* x0 => CRYPT_AES_Key *ctx; x1 => const uint8_t *key
*/
.globl SetDecryptKey192
.type SetDecryptKey192, %function
.align 5
SetDecryptKey192:
AARCH64_PACIASP
stp x29, x30, [sp, #-0x50]!
add x29, sp, #0
stp x25, x28, [sp, #0x10] // Register is stacked.
stp d8, d9, [sp, #0x20] // Register is stacked.
stp d10, d11, [sp, #0x30] // Register is stacked.
stp d12, d13, [sp, #0x40] // Register is stacked.
mov x28, x0
bl .Lenc_key_192
mov x25, #-16
ld1 {v0.4s}, [x28], #16
SETDECKEY_LDR_9_BLOCK x28
ld1 {v10.4s}, [x28], #16
ld1 {v11.4s}, [x28], #16
ld1 {v12.4s}, [x28]
SETDECKEY_INVMIX_9_BLOCK
aesimc v10.16b, v10.16b
aesimc v11.16b, v11.16b
st1 {v0.4s}, [x28], x25
SETDECKEY_STR_9_BLOCK x28, x25
st1 {v10.4s}, [x28], x25
st1 {v11.4s}, [x28], x25
st1 {v12.4s}, [x28]
eor x28, x28, x28
eor x0, x0, x0
ldp d12, d13, [sp, #0x40]
ldp d10, d11, [sp, #0x30]
ldp d8, d9, [sp, #0x20]
ldp x25, x28, [sp, #0x10]
ldp x29, x30, [sp], #0x50 // Stacking completed.
AARCH64_AUTIASP
ret
.size SetDecryptKey192, .-SetDecryptKey192
/*
* void SetEncryptKey256(CRYPT_AES_Key *ctx, const uint8_t *key);
* Generating extended keys.
* x0 => CRYPT_AES_Key *ctx; x1 => const uint8_t *key
*/
.globl SetEncryptKey256
.type SetEncryptKey256, %function
.align 5
SetEncryptKey256:
.Lenc_key_256:
AARCH64_PACIASP
stp x29, x30, [sp, #-64]!
add x29, sp, #0
stp x25, x26, [sp, #16]
stp x23, x24, [sp, #32]
stp x21, x22, [sp, #48] // Register is stacked.
adrp x23, .g_cron
add x23, x23, :lo12:.g_cron // Round key start address.
ld1 {v0.16b}, [x1], #16 // Obtain the first 128-bit key.
mov x24, x0 // Copy key string address. The address increases by 16 bytes.
st1 {v0.4s}, [x0], #16 // Store the first 128-bit key.
ld1 {v1.16b}, [x1] // Obtain the last 128-bit key.
eor v0.16b, v0.16b, v0.16b // Clear zeros in V0.
st1 {v1.4s}, [x0], #16 // Store the last 128-bit key.
mov w26, #14 // Number of encryption rounds.
mov w25, #6 // Loop for 7-1 times.
.Lenc_key_256_loop:
dup v1.4s, v1.s[3] // Repeated four times,The last word of v1 is changed to v1 (128 bits).
ldr w22, [x23], #4 // Obtains the round constant.
ext v1.16b, v1.16b, v1.16b, #1 // Byte cycle.
aese v1.16b, v0.16b // XOR then shift then sbox (XOR operation with 0 is itself,
// equivalent to omitting the XOR operation).
dup v2.4s, w22 // Repeat 4 times. w22 becomes v2.
eor v1.16b, v1.16b, v2.16b // Round constant XOR.
ld1 {v2.4s}, [x24], #16 // Obtains the 4 words used for XOR.
eor v1.16b, v1.16b, v2.16b // 4 XOR operation (1).
ext v2.16b, v0.16b, v2.16b, #12 // 4321->3210.
eor v1.16b, v1.16b, v2.16b // 4 XOR operation (2).
ext v2.16b, v0.16b, v2.16b, #12 // 3210->2100.
eor v1.16b, v1.16b, v2.16b // 4 XOR operation (3).
ext v2.16b, v0.16b, v2.16b, #12 // 2100->1000.
eor v1.16b, v1.16b, v2.16b // 4 XOR operation (4).
st1 {v1.4s}, [x0], #16 // Stores the newly calculated 4-word key data into the key string.
subs w25, w25, #1 // Count of 7-1-round key extensions.
dup v1.4s, v1.s[3] // Repeated four times,The last word of v1 is changed to v1 (128 bits).
ld1 {v2.4s}, [x24], #16 // Obtains the 4 words used for XOR.
aese v1.16b, v0.16b // XOR then shift then sbox.
eor v1.16b, v1.16b, v2.16b // 4 XOR operation (1).
ext v2.16b, v0.16b, v2.16b, #12 // 4321->3210.
eor v1.16b, v1.16b, v2.16b // 4 XOR operation (2).
ext v2.16b, v0.16b, v2.16b, #12 // 3210->2100.
eor v1.16b, v1.16b, v2.16b // 4 XOR operation (3).
ext v2.16b, v0.16b, v2.16b, #12 // 2100->1000.
eor v1.16b, v1.16b, v2.16b // 4 XOR operation (4).
st1 {v1.4s}, [x0], #16 // Stores the newly calculated 4-word key data into the key string.
b.ne .Lenc_key_256_loop // Loop jump.
dup v1.4s, v1.s[3] // Repeated four times,The last word of v1 is changed to v1 (128 bits).
ldr w22, [x23], #4 // Obtains the round constant.
ext v1.16b, v1.16b, v1.16b, #1 // Byte cycle.
aese v1.16b, v0.16b // XOR then shift then sbox.
dup v2.4s, w22 // Repeat 4 times. w22 becomes v2(128bit).
eor v1.16b, v1.16b, v2.16b // Round constant XOR.
ld1 {v2.4s}, [x24], #16 // Obtains the 4 words used for XOR.
eor v1.16b, v1.16b, v2.16b // 4 XOR operation (1).
ext v2.16b, v0.16b, v2.16b, #12 // 4321->3210.
eor v1.16b, v1.16b, v2.16b // 4 XOR operation (2).
ext v2.16b, v0.16b, v2.16b, #12 // 3210->2100.
eor v1.16b, v1.16b, v2.16b // 4 XOR operation (3).
ext v2.16b, v0.16b, v2.16b, #12 // 2100->1000.
eor v1.16b, v1.16b, v2.16b // 4 XOR operation (4).
st1 {v1.4s}, [x0], #16 // Stores the newly calculated 4-word key data into the key string.
str w26, [x0] // Fill in the number of rounds.
eor x24, x24, x24 // Clear sensitivity.
eor x0, x0, x0
ldp x21, x22, [sp, #48]
ldp x23, x24, [sp, #32]
ldp x25, x26, [sp, #16]
ldp x29, x30, [sp], #64 // Stacking completed.
AARCH64_AUTIASP
ret
.size SetEncryptKey256, .-SetEncryptKey256
/*
* void SetDecryptKey256(CRYPT_AES_Key *ctx, const uint8_t *key);
* Set a decryption key string.
* x0 => CRYPT_AES_Key *ctx; x1 => const uint8_t *key
*/
.globl SetDecryptKey256
.type SetDecryptKey256, %function
.align 5
SetDecryptKey256:
AARCH64_PACIASP
stp x29, x30, [sp, #-0x60]!
add x29, sp, #0
stp x25, x28, [sp, #0x10]
stp d8, d9, [sp, #0x20]
stp d10, d11, [sp, #0x30]
stp d12, d13, [sp, #0x40]
stp d14, d15, [sp, #0x50]
mov x28, x0
bl .Lenc_key_256
mov x25, #-16
ld1 {v0.4s}, [x28], #16
SETDECKEY_LDR_9_BLOCK x28
ld1 {v10.4s}, [x28], #16
ld1 {v11.4s}, [x28], #16
ld1 {v12.4s}, [x28], #16
ld1 {v13.4s}, [x28], #16
ld1 {v14.4s}, [x28]
SETDECKEY_INVMIX_9_BLOCK
aesimc v10.16b, v10.16b
aesimc v11.16b, v11.16b
aesimc v12.16b, v12.16b
aesimc v13.16b, v13.16b
st1 {v0.4s}, [x28], x25
SETDECKEY_STR_9_BLOCK x28, x25
st1 {v10.4s}, [x28], x25
st1 {v11.4s}, [x28], x25
st1 {v12.4s}, [x28], x25
st1 {v13.4s}, [x28], x25
st1 {v14.4s}, [x28]
eor x28, x28, x28
eor x0, x0, x0
ldp d14, d15, [sp, #0x50]
ldp d12, d13, [sp, #0x40]
ldp d10, d11, [sp, #0x30]
ldp d8, d9, [sp, #0x20]
ldp x25, x28, [sp, #0x10]
ldp x29, x30, [sp], #0x60 // Stack has been popped.
AARCH64_AUTIASP
ret
.size SetDecryptKey256, .-SetDecryptKey256
#endif
| 2302_82127028/openHiTLS-examples_1508 | crypto/aes/src/asm/crypt_aes_armv8.S | Unix Assembly | unknown | 17,570 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#include "hitls_build.h"
#if defined(HITLS_CRYPTO_AES) && defined(HITLS_CRYPTO_CBC)
#include "crypt_arm.h"
#include "crypt_aes_macro_armv8.s"
.file "crypt_aes_cbc_armv8.S"
.text
.arch armv8-a+crypto
KEY .req x0
IN .req x1
OUT .req x2
LEN .req x3
P_IV .req x4
KTMP .req x5
ROUNDS .req w6
BLK0 .req v0
BLK1 .req v1
BLK2 .req v2
BLK3 .req v3
BLK4 .req v4
BLK5 .req v5
BLK6 .req v6
BLK7 .req v7
KEY0_END .req v16
KEY0 .req v17
KEY1 .req v18
KEY2 .req v19
KEY3 .req v20
KEY4 .req v21
KEY5 .req v22
KEY6 .req v23
KEY7 .req v24
KEY8 .req v25
KEY9 .req v26
KEY10 .req v27
KEY11 .req v28
KEY12 .req v29
KEY13 .req v30
KEY14 .req v31
IVENC .req v1
IV0 .req v17
IV1 .req v18
IV2 .req v19
IV3 .req v20
IV4 .req v21
IV5 .req v22
IV6 .req v23
IV7 .req v24
IVT .req v25
RDK0 .req v26
RDK1 .req v27
RDK2 .req v28
/*
* One round of encryption process.
* block:input the plaintext.
* key: One round key.
*/
.macro ROUND block, key
aese \block, \key
aesmc \block, \block
.endm
/*
* Eight blocks of decryption.
* block0_7:Input the ciphertext.
* rdk0: Round key.
* ktmp: Temporarily stores pointers to keys.
*/
.macro DEC8 rdk0s rdk0 blk0 blk1 blk2 blk3 blk4 blk5 blk6 blk7 ktmp
aesd \blk0, \rdk0
aesimc \blk0, \blk0
aesd \blk5, \rdk0
aesimc \blk5, \blk5
aesd \blk1, \rdk0
aesimc \blk1, \blk1
aesd \blk6, \rdk0
aesimc \blk6, \blk6
aesd \blk2, \rdk0
aesimc \blk2, \blk2
aesd \blk3, \rdk0
aesimc \blk3, \blk3
aesd \blk4, \rdk0
aesimc \blk4, \blk4
aesd \blk7, \rdk0
aesimc \blk7, \blk7
ld1 {\rdk0s}, [\ktmp], #16
.endm
/**
* Function description: AES encrypted assembly acceleration API in CBC mode.
* int32_t CRYPT_AES_CBC_Encrypt(const CRYPT_AES_Key *ctx,
* const uint8_t *in,
* uint8_t *out,
* uint32_t len,
* uint8_t *iv);
* Input register:
* x0:Pointer to the input key structure
* x1:points to the input data address
* x2:points to the output data address
* x3:Length of the input data, which must be a multiple of 16
* x4:Points to the CBC mode mask address
* Change register:x5, x6, v0-v31
* Output register:x0
* Function/Macro Call: None
*/
.globl CRYPT_AES_CBC_Encrypt
.type CRYPT_AES_CBC_Encrypt, %function
CRYPT_AES_CBC_Encrypt:
AARCH64_PACIASP
ld1 {IVENC.16b}, [P_IV] // load IV
ldr w6, [KEY, #240] // load rounds
ld1 {BLK0.16b}, [IN], #16 // load in
ld1 {KEY0.4s, KEY1.4s}, [KEY], #32 // load keys
cmp w6, #12
ld1 {KEY2.4s, KEY3.4s}, [KEY], #32
ld1 {KEY4.4s, KEY5.4s}, [KEY], #32
ld1 {KEY6.4s, KEY7.4s}, [KEY], #32
ld1 {KEY8.4s, KEY9.4s}, [KEY], #32
eor IVENC.16b, IVENC.16b, BLK0.16b // iv + in
b.lt .Laes_cbc_128_start
ld1 {KEY10.4s, KEY11.4s}, [KEY], #32
b.eq .Laes_cbc_192_start
ld1 {KEY12.4s, KEY13.4s}, [KEY], #32
.Laes_cbc_256_start:
ld1 {KEY14.4s}, [KEY]
ROUND IVENC.16b, KEY0.16b
eor KEY0_END.16b, KEY0.16b, KEY14.16b // key0 + keyEnd
b .Laes_cbc_256_round_loop
.Laes_cbc_256_loop:
ROUND IVENC.16b, KEY0.16b
st1 {BLK0.16b}, [OUT], #16
.Laes_cbc_256_round_loop:
ROUND IVENC.16b, KEY1.16b
ROUND IVENC.16b, KEY2.16b
subs LEN, LEN, #16
ROUND IVENC.16b, KEY3.16b
ROUND IVENC.16b, KEY4.16b
ROUND IVENC.16b, KEY5.16b
ld1 {KEY0.16b}, [IN], #16 // load IN
ROUND IVENC.16b, KEY6.16b
ROUND IVENC.16b, KEY7.16b
ROUND IVENC.16b, KEY8.16b
ROUND IVENC.16b, KEY9.16b
ROUND IVENC.16b, KEY10.16b
ROUND IVENC.16b, KEY11.16b
ROUND IVENC.16b, KEY12.16b
aese IVENC.16b, KEY13.16b
eor KEY0.16b, KEY0.16b, KEY0_END.16b // IN + KEY0 + KEYEND
eor BLK0.16b, IVENC.16b, KEY14.16b
b.gt .Laes_cbc_256_loop
b .Lescbcenc_finish
.Laes_cbc_128_start:
ld1 {KEY10.4s}, [KEY]
ROUND IVENC.16b, KEY0.16b
eor KEY0_END.16b, KEY0.16b, KEY10.16b // key0 + keyEnd
b .Laes_cbc_128_round_loop
.Laes_cbc_128_loop:
ROUND IVENC.16b, KEY0.16b
st1 {BLK0.16b}, [OUT], #16
.Laes_cbc_128_round_loop:
ROUND IVENC.16b, KEY1.16b
ROUND IVENC.16b, KEY2.16b
subs LEN, LEN, #16
ROUND IVENC.16b, KEY3.16b
ROUND IVENC.16b, KEY4.16b
ROUND IVENC.16b, KEY5.16b
ld1 {KEY0.16b}, [IN], #16 // load IN
ROUND IVENC.16b, KEY6.16b
ROUND IVENC.16b, KEY7.16b
ROUND IVENC.16b, KEY8.16b
aese IVENC.16b, KEY9.16b
eor KEY0.16b, KEY0.16b, KEY0_END.16b // IN + KEY0 + KEYEND
eor BLK0.16b, IVENC.16b, KEY10.16b // enc OK
b.gt .Laes_cbc_128_loop
b .Lescbcenc_finish
.Laes_cbc_192_start:
ld1 {KEY12.4s}, [KEY]
ROUND IVENC.16b, KEY0.16b
eor KEY0_END.16b, KEY0.16b, KEY12.16b // key0 + keyEnd
b .Laes_cbc_192_round_loop
.Laes_cbc_192_loop:
ROUND IVENC.16b, KEY0.16b
st1 {BLK0.16b}, [OUT], #16
.Laes_cbc_192_round_loop:
ROUND IVENC.16b, KEY1.16b
ROUND IVENC.16b, KEY2.16b
subs LEN, LEN, #16
ROUND IVENC.16b, KEY3.16b
ROUND IVENC.16b, KEY4.16b
ROUND IVENC.16b, KEY5.16b
ld1 {KEY0.16b}, [IN], #16 // load IN
ROUND IVENC.16b, KEY6.16b
ROUND IVENC.16b, KEY7.16b
ROUND IVENC.16b, KEY8.16b
ROUND IVENC.16b, KEY9.16b
ROUND IVENC.16b, KEY10.16b
aese IVENC.16b, KEY11.16b
eor KEY0.16b, KEY0.16b, KEY0_END.16b // IN + KEY0 + KEYEND
eor BLK0.16b, IVENC.16b, KEY12.16b
b.gt .Laes_cbc_192_loop
.Lescbcenc_finish:
st1 {BLK0.16b}, [OUT], #16
st1 {BLK0.16b}, [P_IV]
mov x0, #0
AARCH64_AUTIASP
ret
.size CRYPT_AES_CBC_Encrypt, .-CRYPT_AES_CBC_Encrypt
/**
* Function description: AES decryption and assembly acceleration API in CBC mode.
* int32_t CRYPT_AES_CBC_Decrypt(const CRYPT_AES_Key *ctx,
* const uint8_t *in,
* uint8_t *out,
* uint32_t len,
* uint8_t *iv);
* Input register:
* x0:pointer to the input key structure
* x1:points to the input data address
* x2:points to the output data address
* x3:Length of the input data, which must be a multiple of 16
* x4:Points to the CBC mode mask address
* Change register:x5, x6, v0-v31
* Output register:x0
* Function/Macro Call: AES_DEC_8_BLKS, AES_DEC_1_BLK, AES_DEC_2_BLKS, AES_DEC_3_BLKS,
* AES_DEC_4_BLKS, AES_DEC_5_BLKS, AES_DEC_6_BLKS, AES_DEC_7_BLKS
*/
.globl CRYPT_AES_CBC_Decrypt
.type CRYPT_AES_CBC_Decrypt, %function
CRYPT_AES_CBC_Decrypt:
AARCH64_PACIASP
ld1 {IV0.16b}, [P_IV]
.Lcbc_aesdec_start:
cmp LEN, #64
b.ge .Lcbc_dec_above_equal_4_blks
cmp LEN, #32
b.ge .Lcbc_dec_above_equal_2_blks
cmp LEN, #0
b.eq .Lcbc_aesdec_finish
b .Lcbc_dec_proc_1_blk
.Lcbc_dec_above_equal_2_blks:
cmp LEN, #48
b.lt .Lcbc_dec_proc_2_blks
b .Lcbc_dec_proc_3_blks
.Lcbc_dec_above_equal_4_blks:
cmp LEN, #96
b.ge .Lcbc_dec_above_equal_6_blks
cmp LEN, #80
b.lt .Lcbc_dec_proc_4_blks
b .Lcbc_dec_proc_5_blks
.Lcbc_dec_above_equal_6_blks:
cmp LEN, #112
b.lt .Lcbc_dec_proc_6_blks
cmp LEN, #128
b.lt .Lcbc_dec_proc_7_blks
.align 4
.Lcbc_aesdec_8_blks_loop:
ld1 {BLK0.16b, BLK1.16b, BLK2.16b, BLK3.16b}, [IN], #64
mov KTMP, KEY
ldr ROUNDS, [KEY, #240]
ld1 {BLK4.16b, BLK5.16b, BLK6.16b, BLK7.16b}, [IN], #64
mov IV1.16b, BLK0.16b
mov IV2.16b, BLK1.16b
mov IV3.16b, BLK2.16b
ld1 {RDK0.4s, RDK1.4s}, [KTMP], #32
mov IV4.16b, BLK3.16b
mov IV5.16b, BLK4.16b
mov IV6.16b, BLK5.16b
mov IV7.16b, BLK6.16b
mov IVT.16b, BLK7.16b
DEC8 RDK0.4s, RDK0.16b, BLK0.16b, BLK1.16b, BLK2.16b, BLK3.16b, BLK4.16b, BLK5.16b, BLK6.16b, BLK7.16b, KTMP
DEC8 RDK1.4s, RDK1.16b, BLK0.16b, BLK1.16b, BLK2.16b, BLK3.16b, BLK4.16b, BLK5.16b, BLK6.16b, BLK7.16b, KTMP
DEC8 RDK0.4s, RDK0.16b, BLK0.16b, BLK1.16b, BLK2.16b, BLK3.16b, BLK4.16b, BLK5.16b, BLK6.16b, BLK7.16b, KTMP
DEC8 RDK1.4s, RDK1.16b, BLK0.16b, BLK1.16b, BLK2.16b, BLK3.16b, BLK4.16b, BLK5.16b, BLK6.16b, BLK7.16b, KTMP
DEC8 RDK0.4s, RDK0.16b, BLK0.16b, BLK1.16b, BLK2.16b, BLK3.16b, BLK4.16b, BLK5.16b, BLK6.16b, BLK7.16b, KTMP
DEC8 RDK1.4s, RDK1.16b, BLK0.16b, BLK1.16b, BLK2.16b, BLK3.16b, BLK4.16b, BLK5.16b, BLK6.16b, BLK7.16b, KTMP
DEC8 RDK0.4s, RDK0.16b, BLK0.16b, BLK1.16b, BLK2.16b, BLK3.16b, BLK4.16b, BLK5.16b, BLK6.16b, BLK7.16b, KTMP
DEC8 RDK1.4s, RDK1.16b, BLK0.16b, BLK1.16b, BLK2.16b, BLK3.16b, BLK4.16b, BLK5.16b, BLK6.16b, BLK7.16b, KTMP
cmp ROUNDS, #12
b.lt .Ldec_8_blks_last
DEC8 RDK0.4s, RDK0.16b, BLK0.16b, BLK1.16b, BLK2.16b, BLK3.16b, BLK4.16b, BLK5.16b, BLK6.16b, BLK7.16b, KTMP
DEC8 RDK1.4s, RDK1.16b, BLK0.16b, BLK1.16b, BLK2.16b, BLK3.16b, BLK4.16b, BLK5.16b, BLK6.16b, BLK7.16b, KTMP
b.eq .Ldec_8_blks_last
DEC8 RDK0.4s, RDK0.16b, BLK0.16b, BLK1.16b, BLK2.16b, BLK3.16b, BLK4.16b, BLK5.16b, BLK6.16b, BLK7.16b, KTMP
DEC8 RDK1.4s, RDK1.16b, BLK0.16b, BLK1.16b, BLK2.16b, BLK3.16b, BLK4.16b, BLK5.16b, BLK6.16b, BLK7.16b, KTMP
.Ldec_8_blks_last:
ld1 {RDK2.4s}, [KTMP]
aesd BLK0.16b, RDK0.16b
aesimc BLK0.16b, BLK0.16b
aesd BLK1.16b, RDK0.16b
aesimc BLK1.16b, BLK1.16b
aesd BLK2.16b, RDK0.16b
aesimc BLK2.16b, BLK2.16b
eor IV0.16b, IV0.16b, RDK2.16b
aesd BLK3.16b, RDK0.16b
aesimc BLK3.16b, BLK3.16b
eor IV1.16b, IV1.16b, RDK2.16b
aesd BLK4.16b, RDK0.16b
aesimc BLK4.16b, BLK4.16b
eor IV2.16b, IV2.16b, RDK2.16b
aesd BLK5.16b, RDK0.16b
aesimc BLK5.16b, BLK5.16b
eor IV3.16b, IV3.16b, RDK2.16b
aesd BLK6.16b, RDK0.16b
aesimc BLK6.16b, BLK6.16b
eor IV4.16b, IV4.16b, RDK2.16b
aesd BLK7.16b, RDK0.16b
aesimc BLK7.16b, BLK7.16b
eor IV5.16b, IV5.16b, RDK2.16b
aesd BLK0.16b, RDK1.16b
aesd BLK1.16b, RDK1.16b
eor IV6.16b, IV6.16b, RDK2.16b
aesd BLK2.16b, RDK1.16b
aesd BLK3.16b, RDK1.16b
eor IV7.16b, IV7.16b, RDK2.16b
aesd BLK4.16b, RDK1.16b
aesd BLK5.16b, RDK1.16b
aesd BLK6.16b, RDK1.16b
aesd BLK7.16b, RDK1.16b
sub LEN, LEN, #128
eor BLK0.16b, BLK0.16b, IV0.16b
eor BLK1.16b, BLK1.16b, IV1.16b
eor BLK2.16b, BLK2.16b, IV2.16b
eor BLK3.16b, BLK3.16b, IV3.16b
st1 {BLK0.16b, BLK1.16b, BLK2.16b, BLK3.16b}, [OUT], #64
eor BLK4.16b, BLK4.16b, IV4.16b
eor BLK5.16b, BLK5.16b, IV5.16b
cmp LEN, #0
eor BLK6.16b, BLK6.16b, IV6.16b
eor BLK7.16b, BLK7.16b, IV7.16b
mov IV0.16b, IVT.16b
st1 {BLK4.16b, BLK5.16b, BLK6.16b, BLK7.16b}, [OUT], #64
b.eq .Lcbc_aesdec_finish
cmp LEN, #128
b.lt .Lcbc_aesdec_start
b .Lcbc_aesdec_8_blks_loop
.Lcbc_dec_proc_1_blk:
ld1 {BLK0.16b}, [IN]
AES_DEC_1_BLK KEY BLK0.16b RDK0.4s RDK1.4s RDK0.16b RDK1.16b ROUNDS
eor BLK0.16b, BLK0.16b, IV0.16b
ld1 {IV0.16b}, [IN]
st1 {BLK0.16b}, [OUT]
b .Lcbc_aesdec_finish
.Lcbc_dec_proc_2_blks:
ld1 {BLK0.16b, BLK1.16b}, [IN]
ld1 {IV1.16b}, [IN], #16
AES_DEC_2_BLKS KEY BLK0.16b BLK1.16b RDK0.4s RDK1.4s RDK0.16b RDK1.16b ROUNDS
eor BLK0.16b, BLK0.16b, IV0.16b
eor BLK1.16b, BLK1.16b, IV1.16b
ld1 {IV0.16b}, [IN]
st1 {BLK0.16b, BLK1.16b}, [OUT]
b .Lcbc_aesdec_finish
.Lcbc_dec_proc_3_blks:
ld1 {BLK0.16b, BLK1.16b, BLK2.16b}, [IN]
ld1 {IV1.16b, IV2.16b}, [IN], #32
AES_DEC_3_BLKS KEY BLK0.16b BLK1.16b BLK2.16b RDK0.4s RDK1.4s RDK0.16b RDK1.16b ROUNDS
eor BLK0.16b, BLK0.16b, IV0.16b
eor BLK1.16b, BLK1.16b, IV1.16b
eor BLK2.16b, BLK2.16b, IV2.16b
ld1 {IV0.16b}, [IN]
st1 {BLK0.16b, BLK1.16b, BLK2.16b}, [OUT]
b .Lcbc_aesdec_finish
.Lcbc_dec_proc_4_blks:
ld1 {BLK0.16b, BLK1.16b, BLK2.16b, BLK3.16b}, [IN]
ld1 {IV1.16b, IV2.16b, IV3.16b}, [IN], #48
AES_DEC_4_BLKS KEY BLK0.16b BLK1.16b BLK2.16b BLK3.16b RDK0.4s RDK1.4s RDK0.16b RDK1.16b ROUNDS
eor BLK0.16b, BLK0.16b, IV0.16b
eor BLK1.16b, BLK1.16b, IV1.16b
eor BLK2.16b, BLK2.16b, IV2.16b
eor BLK3.16b, BLK3.16b, IV3.16b
ld1 {IV0.16b}, [IN]
st1 {BLK0.16b, BLK1.16b, BLK2.16b, BLK3.16b}, [OUT]
b .Lcbc_aesdec_finish
.Lcbc_dec_proc_5_blks:
ld1 {BLK0.16b, BLK1.16b, BLK2.16b, BLK3.16b}, [IN]
ld1 {IV1.16b, IV2.16b, IV3.16b, IV4.16b}, [IN], #64
ld1 {BLK4.16b}, [IN]
AES_DEC_5_BLKS KEY BLK0.16b BLK1.16b BLK2.16b BLK3.16b BLK4.16b RDK0.4s RDK1.4s RDK0.16b RDK1.16b ROUNDS
eor BLK0.16b, BLK0.16b, IV0.16b
eor BLK1.16b, BLK1.16b, IV1.16b
eor BLK2.16b, BLK2.16b, IV2.16b
eor BLK3.16b, BLK3.16b, IV3.16b
eor BLK4.16b, BLK4.16b, IV4.16b
ld1 {IV0.16b}, [IN]
st1 {BLK0.16b, BLK1.16b, BLK2.16b, BLK3.16b}, [OUT], #64
st1 {BLK4.16b}, [OUT]
b .Lcbc_aesdec_finish
.Lcbc_dec_proc_6_blks:
ld1 {BLK0.16b, BLK1.16b, BLK2.16b, BLK3.16b}, [IN]
ld1 {IV1.16b, IV2.16b, IV3.16b, IV4.16b}, [IN], #64
ld1 {BLK4.16b, BLK5.16b}, [IN]
ld1 {IV5.16b}, [IN], #16
AES_DEC_6_BLKS KEY BLK0.16b BLK1.16b BLK2.16b BLK3.16b BLK4.16b BLK5.16b RDK0.4s RDK1.4s RDK0.16b RDK1.16b ROUNDS
eor BLK0.16b, BLK0.16b, IV0.16b
eor BLK1.16b, BLK1.16b, IV1.16b
eor BLK2.16b, BLK2.16b, IV2.16b
eor BLK3.16b, BLK3.16b, IV3.16b
eor BLK4.16b, BLK4.16b, IV4.16b
eor BLK5.16b, BLK5.16b, IV5.16b
ld1 {IV0.16b}, [IN]
st1 {BLK0.16b, BLK1.16b, BLK2.16b, BLK3.16b}, [OUT], #64
st1 {BLK4.16b, BLK5.16b}, [OUT]
b .Lcbc_aesdec_finish
.Lcbc_dec_proc_7_blks:
ld1 {BLK0.16b, BLK1.16b, BLK2.16b, BLK3.16b}, [IN]
ld1 {IV1.16b, IV2.16b, IV3.16b, IV4.16b}, [IN], #64
ld1 {BLK4.16b, BLK5.16b, BLK6.16b}, [IN]
ld1 {IV5.16b, IV6.16b}, [IN], #32
AES_DEC_7_BLKS KEY BLK0.16b BLK1.16b BLK2.16b BLK3.16b BLK4.16b BLK5.16b BLK6.16b RDK0.4s RDK1.4s RDK0.16b RDK1.16b ROUNDS
eor BLK0.16b, BLK0.16b, IV0.16b
eor BLK1.16b, BLK1.16b, IV1.16b
eor BLK2.16b, BLK2.16b, IV2.16b
eor BLK3.16b, BLK3.16b, IV3.16b
eor BLK4.16b, BLK4.16b, IV4.16b
eor BLK5.16b, BLK5.16b, IV5.16b
eor BLK6.16b, BLK6.16b, IV6.16b
ld1 {IV0.16b}, [IN]
st1 {BLK0.16b, BLK1.16b, BLK2.16b, BLK3.16b}, [OUT], #64
st1 {BLK4.16b, BLK5.16b, BLK6.16b}, [OUT]
.Lcbc_aesdec_finish:
st1 {IV0.16b}, [P_IV]
mov x0, #0
eor RDK0.16b, RDK0.16b, RDK0.16b
eor RDK1.16b, RDK1.16b, RDK1.16b
eor RDK2.16b, RDK2.16b, RDK2.16b
AARCH64_AUTIASP
ret
.size CRYPT_AES_CBC_Decrypt, .-CRYPT_AES_CBC_Decrypt
#endif
| 2302_82127028/openHiTLS-examples_1508 | crypto/aes/src/asm/crypt_aes_cbc_armv8.S | Unix Assembly | unknown | 15,260 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#include "hitls_build.h"
#if defined(HITLS_CRYPTO_AES) && defined(HITLS_CRYPTO_CBC)
#include "crypt_aes_macro_x86_64.s"
.file "crypt_aes_cbc_x86_64.S"
.text
.set ARG1, %rdi
.set ARG2, %rsi
.set ARG3, %rdx
.set ARG4, %ecx
.set ARG5, %r8
.set ARG6, %r9
.set RDK, %xmm3
.set KEY, %rdi
.set KTMP, %r9
.set ROUNDS, %eax
.set RET, %eax
.set BLK0, %xmm1
.set BLK1, %xmm4
.set BLK2, %xmm5
.set BLK3, %xmm6
.set BLK4, %xmm10
.set BLK5, %xmm11
.set BLK6, %xmm12
.set BLK7, %xmm13
.set IV0, %xmm0
.set IV1, %xmm7
.set IV2, %xmm8
.set IV3, %xmm9
.set KEY1, %xmm4
.set KEY2, %xmm5
.set KEY3, %xmm6
.set KEY4, %xmm10
.set KEY5, %xmm11
.set KEY6, %xmm12
.set KEY7, %xmm13
.set KEY8, %xmm14
.set KEY9, %xmm15
.set KEY10, %xmm2
.set KEY11, %xmm7
.set KEY12, %xmm8
.set KEY13, %xmm9
.set KEYTEMP, %xmm3
/**
* Function description:AES encrypted assembly acceleration API in CBC mode.
* Function prototype:int32_t CRYPT_AES_CBC_Encrypt(const CRYPT_AES_Key *ctx,
* const uint8_t *in,
* uint8_t *out,
* uint32_t len,
* uint8_t *iv);
* Input register:
* rdi:pointer to the input key structure
* rsi:points to the input data address
* rdx:points to the output data address
* rcx:Length of the input data, which must be a multiple of 16
* r8: Points to the CBC mode mask address
* Change register:xmm0-xmm15
* Output register:eax
* Function/Macro Call: None
*/
.globl CRYPT_AES_CBC_Encrypt
.type CRYPT_AES_CBC_Encrypt, @function
CRYPT_AES_CBC_Encrypt:
.cfi_startproc
.align 16
cmpl $16, ARG4
jb .Laescbcend_end
movl 240(KEY), ROUNDS
vmovdqu (ARG5), IV0
vmovdqu (KEY), KEY1
vmovdqu 16(KEY), KEY2
vmovdqu 32(KEY), KEY3
vmovdqu 48(KEY), KEY4
vmovdqu 64(KEY), KEY5
vmovdqu 80(KEY), KEY6
vmovdqu 96(KEY), KEY7
vmovdqu 112(KEY), KEY8
vmovdqu 128(KEY), KEY9
vmovdqu 144(KEY), KEY10
vmovdqu 160(KEY), KEY11
cmpl $12, ROUNDS
jb .Laes_128_cbc_start
je .Laes_192_cbc_start
.align 16
.Laes_256_cbc_start:
vmovdqu 176(KEY), KEY12
vmovdqu 192(KEY), KEY13
.Laes_256_cbc_loop:
vpxor (ARG2), IV0, BLK0
vmovdqu 208(KEY), KEYTEMP
vpxor BLK0, KEY1, BLK0
aesenc KEY2, BLK0
aesenc KEY3, BLK0
aesenc KEY4, BLK0
aesenc KEY5, BLK0
aesenc KEY6, BLK0
aesenc KEY7, BLK0
aesenc KEY8, BLK0
aesenc KEY9, BLK0
aesenc KEY10, BLK0
aesenc KEY11, BLK0
aesenc KEY12, BLK0
aesenc KEY13, BLK0
aesenc KEYTEMP, BLK0
vmovdqu 224(KEY), KEYTEMP
aesenclast KEYTEMP, BLK0
leaq 16(ARG2), ARG2
vmovdqu BLK0, (ARG3)
movdqa BLK0, IV0
leaq 16(ARG3), ARG3
subl $16, ARG4
cmpl $16, ARG4
jnb .Laes_256_cbc_loop // Special value processing
vpxor KEY12, KEY12, KEY12
vpxor KEY13, KEY13, KEY13
vpxor KEYTEMP, KEYTEMP, KEYTEMP
jmp .Laescbcenc_finish
.align 16
.Laes_192_cbc_start:
vmovdqu 176(KEY), KEY12
vmovdqu 192(KEY), KEY13
.Laes_192_cbc_loop:
vpxor (ARG2), IV0, BLK0
vpxor BLK0, KEY1, BLK0
aesenc KEY2, BLK0
aesenc KEY3, BLK0
aesenc KEY4, BLK0
aesenc KEY5, BLK0
aesenc KEY6, BLK0
aesenc KEY7, BLK0
aesenc KEY8, BLK0
aesenc KEY9, BLK0
aesenc KEY10, BLK0
aesenc KEY11, BLK0
aesenc KEY12, BLK0
aesenclast KEY13, BLK0
leaq 16(ARG2), ARG2
vmovdqu BLK0, (ARG3)
movdqa BLK0, IV0
leaq 16(ARG3), ARG3
subl $16 , ARG4
jnz .Laes_192_cbc_loop
vpxor KEY12, KEY12, KEY12
vpxor KEY13, KEY13, KEY13
jmp .Laescbcenc_finish
.align 16
.Laes_128_cbc_start:
vpxor (ARG2), IV0, BLK0
vpxor BLK0, KEY1, BLK0
aesenc KEY2, BLK0
aesenc KEY3, BLK0
aesenc KEY4, BLK0
aesenc KEY5, BLK0
aesenc KEY6, BLK0
aesenc KEY7, BLK0
aesenc KEY8, BLK0
aesenc KEY9, BLK0
aesenc KEY10, BLK0
aesenclast KEY11, BLK0
leaq 16(ARG2), ARG2
vmovdqu BLK0, (ARG3)
movdqa BLK0, IV0
leaq 16(ARG3), ARG3
subl $16, ARG4
jnz .Laes_128_cbc_start
jmp .Laescbcenc_finish
.Laescbcenc_finish:
vmovdqu BLK0,(ARG5)
vpxor KEY1, KEY1, KEY1
vpxor KEY2, KEY2, KEY2
vpxor KEY3, KEY3, KEY3
vpxor KEY4, KEY4, KEY4
vpxor KEY5, KEY5, KEY5
vpxor KEY6, KEY6, KEY6
vpxor KEY7, KEY7, KEY7
vpxor KEY8, KEY8, KEY8
vpxor KEY9, KEY9, KEY9
vpxor KEY10, KEY10, KEY10
vpxor KEY11, KEY11, KEY11
.Laescbcend_end:
movl $0, RET
ret
.cfi_endproc
.size CRYPT_AES_CBC_Encrypt, .-CRYPT_AES_CBC_Encrypt
/**
* Function description: Sets the AES decryption and assembly accelerated implementation interface in CBC mode
* Function prototype:int32_t CRYPT_AES_CBC_Decrypt(const CRYPT_AES_Key *ctx,
* const uint8_t *in,
* uint8_t *out,
* uint32_t len,
* uint8_t *iv);
* Input register:
* rdi:pointer to the input key structure
* rsi:points to the input data address.
* rdx:points to the output data address.
* rcx:Length of the input data, which must be a multiple of 16
* r8: Points to the CBC mode mask address
* Change register:xmm0-xmm13
* Output register:eax
* Function/Macro Call: None
*/
.globl CRYPT_AES_CBC_Decrypt
.type CRYPT_AES_CBC_Decrypt, @function
CRYPT_AES_CBC_Decrypt:
.cfi_startproc
.align 16
vmovdqu (ARG5), IV0
.Laes_cbc_dec_start:
cmpl $64, ARG4
jae .Labove_equal_4_blks
cmpl $32, ARG4
jae .Labove_equal_2_blks
cmpl $0, ARG4
je .Laes_cbc_dec_finish
jmp .Lproc_1_blk
.Labove_equal_2_blks:
cmpl $48, ARG4
jb .Lproc_2_blks
jmp .Lproc_3_blks
.Labove_equal_4_blks:
cmpl $96, ARG4
jae .Labove_equal_6_blks
cmpl $80, ARG4
jb .Lproc_4_blks
jmp .Lproc_5_blks
.Labove_equal_6_blks:
cmpl $112, ARG4
jb .Lproc_6_blks
cmpl $128, ARG4
jb .Lproc_7_blks
.align 16
.Lproc_8_blks:
.Laescbcdec_8_blks_loop:
vmovdqu (ARG2), BLK0
vmovdqu 16(ARG2), BLK1
vmovdqu 32(ARG2), BLK2
movdqa BLK0, IV1
movdqa BLK1, IV2
movdqa BLK2, IV3
movq KEY, KTMP
movl 240(KEY), ROUNDS
vmovdqu (KEY), RDK
vpxor BLK0, RDK, BLK0
vpxor BLK1, RDK, BLK1
vpxor BLK2, RDK, BLK2
vpxor 48(ARG2), RDK, BLK3
vpxor 64(ARG2), RDK, BLK4
vpxor 80(ARG2), RDK, BLK5
vpxor 96(ARG2), RDK, BLK6
vpxor 112(ARG2), RDK, BLK7
decl ROUNDS
AES_DEC_8_BLKS KTMP ROUNDS RDK BLK0 BLK1 BLK2 BLK3 BLK4 BLK5 BLK6 BLK7
vpxor BLK0, IV0, BLK0
vpxor BLK1, IV1, BLK1
vpxor BLK2, IV2, BLK2
vpxor BLK3, IV3, BLK3
vpxor 48(ARG2), BLK4, BLK4
vpxor 64(ARG2), BLK5, BLK5
vpxor 80(ARG2), BLK6, BLK6
vpxor 96(ARG2), BLK7, BLK7
vmovdqu 112(ARG2), IV0
vmovdqu BLK0, (ARG3)
vmovdqu BLK1, 16(ARG3)
vmovdqu BLK2, 32(ARG3)
vmovdqu BLK3, 48(ARG3)
vmovdqu BLK4, 64(ARG3)
vmovdqu BLK5, 80(ARG3)
vmovdqu BLK6, 96(ARG3)
vmovdqu BLK7, 112(ARG3)
subl $128, ARG4
leaq 128(ARG2), ARG2
leaq 128(ARG3), ARG3
cmpl $128, ARG4
jb .Laes_cbc_dec_start
jmp .Laescbcdec_8_blks_loop
.align 16
.Lproc_1_blk:
movl 240(KEY), ROUNDS
vmovdqu (KEY), RDK
vpxor (ARG2), RDK, BLK0
decl ROUNDS
AES_DEC_1_BLK KEY ROUNDS RDK BLK0
vpxor BLK0, IV0, BLK0
vmovdqu (ARG2), IV0
vmovdqu BLK0, (ARG3)
jmp .Laes_cbc_dec_finish
.align 16
.Lproc_2_blks:
vmovdqu (ARG2), BLK0
movl 240(KEY), ROUNDS
vmovdqu (KEY), RDK
movdqa BLK0, IV1
vpxor BLK0, RDK, BLK0
vpxor 16(ARG2), RDK, BLK1
decl ROUNDS
AES_DEC_2_BLKS KEY ROUNDS RDK BLK0 BLK1
vpxor BLK0, IV0, BLK0
vpxor BLK1, IV1, BLK1
vmovdqu 16(ARG2), IV0
vmovdqu BLK0, (ARG3)
vmovdqu BLK1, 16(ARG3)
jmp .Laes_cbc_dec_finish
.align 16
.Lproc_3_blks:
vmovdqu (ARG2), BLK0
vmovdqu 16(ARG2), BLK1
movl 240(KEY), ROUNDS
vmovdqu (KEY), RDK
movdqa BLK0, IV1
movdqa BLK1, IV2
vpxor BLK0, RDK, BLK0
vpxor BLK1, RDK, BLK1
vpxor 32(ARG2), RDK, BLK2
decl ROUNDS
AES_DEC_3_BLKS KEY ROUNDS RDK BLK0 BLK1 BLK2
vpxor BLK0, IV0, BLK0
vpxor BLK1, IV1, BLK1
vpxor BLK2, IV2, BLK2
vmovdqu 32(ARG2), IV0
vmovdqu BLK0, (ARG3)
vmovdqu BLK1, 16(ARG3)
vmovdqu BLK2, 32(ARG3)
jmp .Laes_cbc_dec_finish
.align 16
.Lproc_4_blks:
vmovdqu (ARG2), BLK0
vmovdqu 16(ARG2), BLK1
vmovdqu 32(ARG2), BLK2
movl 240(KEY), ROUNDS
vmovdqu (KEY), RDK
movdqa BLK0, IV1
movdqa BLK1, IV2
movdqa BLK2, IV3
vpxor BLK0, RDK, BLK0
vpxor BLK1, RDK, BLK1
vpxor BLK2, RDK, BLK2
vpxor 48(ARG2), RDK, BLK3
decl ROUNDS
AES_DEC_4_BLKS KEY ROUNDS RDK BLK0 BLK1 BLK2 BLK3
vpxor BLK0, IV0, BLK0
vpxor BLK1, IV1, BLK1
vpxor BLK2, IV2, BLK2
vpxor BLK3, IV3, BLK3
vmovdqu 48(ARG2), IV0
vmovdqu BLK0, (ARG3)
vmovdqu BLK1, 16(ARG3)
vmovdqu BLK2, 32(ARG3)
vmovdqu BLK3, 48(ARG3)
jmp .Laes_cbc_dec_finish
.align 16
.Lproc_5_blks:
vmovdqu (ARG2), BLK0
vmovdqu 16(ARG2), BLK1
vmovdqu 32(ARG2), BLK2
movl 240(KEY), ROUNDS
vmovdqu (KEY), RDK
movdqa BLK0, IV1
movdqa BLK1, IV2
movdqa BLK2, IV3
vpxor BLK0, RDK, BLK0
vpxor BLK1, RDK, BLK1
vpxor BLK2, RDK, BLK2
vpxor 48(ARG2), RDK, BLK3
vpxor 64(ARG2), RDK, BLK4
decl ROUNDS
AES_DEC_5_BLKS KEY ROUNDS RDK BLK0 BLK1 BLK2 BLK3 BLK4
vpxor BLK0, IV0, BLK0
vpxor BLK1, IV1, BLK1
vpxor BLK2, IV2, BLK2
vpxor BLK3, IV3, BLK3
vpxor 48(ARG2), BLK4, BLK4
vmovdqu 64(ARG2), IV0
vmovdqu BLK0, (ARG3)
vmovdqu BLK1, 16(ARG3)
vmovdqu BLK2, 32(ARG3)
vmovdqu BLK3, 48(ARG3)
vmovdqu BLK4, 64(ARG3)
jmp .Laes_cbc_dec_finish
.align 16
.Lproc_6_blks:
vmovdqu (ARG2), BLK0
vmovdqu 16(ARG2), BLK1
vmovdqu 32(ARG2), BLK2
movl 240(KEY), ROUNDS
vmovdqu (KEY), RDK
movdqa BLK0, IV1
movdqa BLK1, IV2
movdqa BLK2, IV3
vpxor (ARG2), RDK, BLK0
vpxor 16(ARG2), RDK, BLK1
vpxor 32(ARG2), RDK, BLK2
vpxor 48(ARG2), RDK, BLK3
vpxor 64(ARG2), RDK, BLK4
vpxor 80(ARG2), RDK, BLK5
decl ROUNDS
AES_DEC_6_BLKS KEY ROUNDS RDK BLK0 BLK1 BLK2 BLK3 BLK4 BLK5
vpxor BLK0, IV0, BLK0
vpxor BLK1, IV1, BLK1
vpxor BLK2, IV2, BLK2
vpxor BLK3, IV3, BLK3
vpxor 48(ARG2), BLK4, BLK4
vpxor 64(ARG2), BLK5, BLK5
vmovdqu 80(ARG2), IV0
vmovdqu BLK0, (ARG3)
vmovdqu BLK1, 16(ARG3)
vmovdqu BLK2, 32(ARG3)
vmovdqu BLK3, 48(ARG3)
vmovdqu BLK4, 64(ARG3)
vmovdqu BLK5, 80(ARG3)
jmp .Laes_cbc_dec_finish
.align 16
.Lproc_7_blks:
vmovdqu (ARG2), BLK0
vmovdqu 16(ARG2), BLK1
vmovdqu 32(ARG2), BLK2
movl 240(KEY), ROUNDS
vmovdqu (KEY), RDK
movdqa BLK0, IV1
movdqa BLK1, IV2
movdqa BLK2, IV3
vpxor (ARG2), RDK, BLK0
vpxor 16(ARG2), RDK, BLK1
vpxor 32(ARG2), RDK, BLK2
vpxor 48(ARG2), RDK, BLK3
vpxor 64(ARG2), RDK, BLK4
vpxor 80(ARG2), RDK, BLK5
vpxor 96(ARG2), RDK, BLK6
decl ROUNDS
AES_DEC_7_BLKS KEY ROUNDS RDK BLK0 BLK1 BLK2 BLK3 BLK4 BLK5 BLK6
vpxor BLK0, IV0, BLK0
vpxor BLK1, IV1, BLK1
vpxor BLK2, IV2, BLK2
vpxor BLK3, IV3, BLK3
vpxor 48(ARG2), BLK4, BLK4
vpxor 64(ARG2), BLK5, BLK5
vpxor 80(ARG2), BLK6, BLK6
vmovdqu 96(ARG2), IV0
vmovdqu BLK0, (ARG3)
vmovdqu BLK1, 16(ARG3)
vmovdqu BLK2, 32(ARG3)
vmovdqu BLK3, 48(ARG3)
vmovdqu BLK4, 64(ARG3)
vmovdqu BLK5, 80(ARG3)
vmovdqu BLK6, 96(ARG3)
.align 16
.Laes_cbc_dec_finish:
vmovdqu IV0, (ARG5)
vpxor BLK0, BLK0, BLK0
vpxor BLK1, BLK1, BLK1
vpxor BLK2, BLK2, BLK2
vpxor BLK3, BLK3, BLK3
vpxor BLK4, BLK4, BLK4
vpxor BLK5, BLK5, BLK5
vpxor BLK6, BLK6, BLK6
vpxor BLK7, BLK7, BLK7
vpxor RDK, RDK, RDK
movl $0, RET
ret
.cfi_endproc
.size CRYPT_AES_CBC_Decrypt, .-CRYPT_AES_CBC_Decrypt
#endif
| 2302_82127028/openHiTLS-examples_1508 | crypto/aes/src/asm/crypt_aes_cbc_x86_64.S | Unix Assembly | unknown | 12,757 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#include "hitls_build.h"
#if defined(HITLS_CRYPTO_AES) && defined(HITLS_CRYPTO_CFB)
#include "crypt_arm.h"
#include "crypt_aes_macro_armv8.s"
.file "crypt_aes_cfb_armv8.S"
.text
.arch armv8-a+crypto
.align 5
KEY .req x0
IN .req x1
OUT .req x2
LEN .req x3
IV .req x4
LTMP .req x12
IVC .req v19
CT1 .req v20
CT2 .req v21
CT3 .req v22
CT4 .req v23
CT5 .req v24
CT6 .req v25
CT7 .req v26
CT8 .req v27
BLK0 .req v0
BLK1 .req v1
BLK2 .req v2
BLK3 .req v3
BLK4 .req v4
BLK5 .req v5
BLK6 .req v6
BLK7 .req v7
RDK0 .req v17
RDK1 .req v18
ROUNDS .req w6
/*
* int32_t CRYPT_AES_CFB_Decrypt(const CRYPT_AES_Key *ctx,
* const uint8_t *in,
* uint8_t *out,
* uint32_t len,
* uint8_t *iv);
*/
.globl CRYPT_AES_CFB_Decrypt
.type CRYPT_AES_CFB_Decrypt, %function
CRYPT_AES_CFB_Decrypt:
AARCH64_PACIASP
ld1 {IVC.16b}, [IV] // Load the IV
mov LTMP, LEN
.Lcfb_aesdec_start:
cmp LTMP, #64
b.ge .Lcfb_dec_above_equal_4_blks
cmp LTMP, #32
b.ge .Lcfb_dec_above_equal_2_blks
cmp LTMP, #0
b.eq .Lcfb_len_zero
b .Lcfb_dec_proc_1_blk
.Lcfb_dec_above_equal_2_blks:
cmp LTMP, #48
b.lt .Lcfb_dec_proc_2_blks
b .Lcfb_dec_proc_3_blks
.Lcfb_dec_above_equal_4_blks:
cmp LTMP, #96
b.ge .Lcfb_dec_above_equal_6_blks
cmp LTMP, #80
b.lt .Lcfb_dec_proc_4_blks
b .Lcfb_dec_proc_5_blks
.Lcfb_dec_above_equal_6_blks:
cmp LTMP, #112
b.lt .Lcfb_dec_proc_6_blks
cmp LTMP, #128
b.lt .Lcfb_dec_proc_7_blks
.Lcfb_dec_proc_8_blks:
/* When the length is greater than or equal to 128, eight blocks loop is used */
.Lcfb_aesdec_8_blks_loop:
/* Compute 8 CBF Decryption */
ld1 {BLK0.16b, BLK1.16b, BLK2.16b, BLK3.16b}, [IN], #64
ld1 {BLK4.16b, BLK5.16b, BLK6.16b, BLK7.16b}, [IN], #64
mov CT1.16b, IVC.16b // Prevent the IV or BLK from being changed
mov CT2.16b, BLK0.16b
mov CT3.16b, BLK1.16b
mov CT4.16b, BLK2.16b
mov CT5.16b, BLK3.16b
mov CT6.16b, BLK4.16b
mov CT7.16b, BLK5.16b
mov CT8.16b, BLK6.16b
mov x14, KEY // Prevent the key from being changed
AES_ENC_8_BLKS x14 CT1.16b CT2.16b CT3.16b CT4.16b CT5.16b \
CT6.16b CT7.16b CT8.16b RDK0.4s RDK1.4s RDK0.16b RDK1.16b ROUNDS
mov IVC.16b, BLK7.16b // Prepares for the next loop or update
eor BLK0.16b, BLK0.16b, CT1.16b
eor BLK1.16b, BLK1.16b, CT2.16b
eor BLK2.16b, BLK2.16b, CT3.16b
eor BLK3.16b, BLK3.16b, CT4.16b
eor BLK4.16b, BLK4.16b, CT5.16b
eor BLK5.16b, BLK5.16b, CT6.16b
eor BLK6.16b, BLK6.16b, CT7.16b
eor BLK7.16b, BLK7.16b, CT8.16b
st1 {BLK0.16b, BLK1.16b, BLK2.16b, BLK3.16b}, [OUT], #64
st1 {BLK4.16b, BLK5.16b, BLK6.16b, BLK7.16b}, [OUT], #64
sub LTMP, LTMP, #128
cmp LTMP, #0
b.eq .Lcfb_aesdec_finish
cmp LTMP, #128
b.lt .Lcfb_aesdec_start
b .Lcfb_aesdec_8_blks_loop
.Lcfb_dec_proc_1_blk:
ld1 {BLK0.16b}, [IN]
mov CT1.16b, IVC.16b
AES_ENC_1_BLK KEY CT1.16b RDK0.4s RDK1.4s RDK0.16b RDK1.16b ROUNDS
mov IVC.16b, BLK0.16b
eor BLK0.16b, CT1.16b, BLK0.16b
st1 {BLK0.16b}, [OUT]
b .Lcfb_aesdec_finish
.Lcfb_dec_proc_2_blks:
ld1 {BLK0.16b, BLK1.16b}, [IN]
mov CT1.16b, IVC.16b
mov CT2.16b, BLK0.16b
AES_ENC_2_BLKS KEY CT1.16b CT2.16b RDK0.4s RDK1.4s RDK0.16b RDK1.16b ROUNDS
mov IVC.16b, BLK1.16b
eor BLK0.16b, CT1.16b, BLK0.16b
eor BLK1.16b, CT2.16b, BLK1.16b
st1 {BLK0.16b, BLK1.16b}, [OUT]
b .Lcfb_aesdec_finish
.Lcfb_dec_proc_3_blks:
ld1 {BLK0.16b, BLK1.16b, BLK2.16b}, [IN]
mov CT1.16b, IVC.16b
mov CT2.16b, BLK0.16b
mov CT3.16b, BLK1.16b
AES_ENC_3_BLKS KEY CT1.16b CT2.16b CT3.16b RDK0.4s RDK1.4s RDK0.16b RDK1.16b ROUNDS
mov IVC.16b, BLK2.16b
eor BLK0.16b, BLK0.16b, CT1.16b
eor BLK1.16b, BLK1.16b, CT2.16b
eor BLK2.16b, BLK2.16b, CT3.16b
st1 {BLK0.16b, BLK1.16b, BLK2.16b}, [OUT]
b .Lcfb_aesdec_finish
.Lcfb_dec_proc_4_blks:
ld1 {BLK0.16b, BLK1.16b, BLK2.16b, BLK3.16b}, [IN]
mov CT1.16b, IVC.16b
mov CT2.16b, BLK0.16b
mov CT3.16b, BLK1.16b
mov CT4.16b, BLK2.16b
AES_ENC_4_BLKS KEY CT1.16b CT2.16b CT3.16b CT4.16b RDK0.4s RDK1.4s RDK0.16b RDK1.16b ROUNDS
mov IVC.16b, BLK3.16b
eor BLK0.16b, BLK0.16b, CT1.16b
eor BLK1.16b, BLK1.16b, CT2.16b
eor BLK2.16b, BLK2.16b, CT3.16b
eor BLK3.16b, BLK3.16b, CT4.16b
st1 {BLK0.16b, BLK1.16b, BLK2.16b, BLK3.16b}, [OUT]
b .Lcfb_aesdec_finish
.Lcfb_dec_proc_5_blks:
ld1 {BLK0.16b, BLK1.16b, BLK2.16b, BLK3.16b}, [IN], #64
ld1 {BLK4.16b}, [IN]
mov CT1.16b, IVC.16b
mov CT2.16b, BLK0.16b
mov CT3.16b, BLK1.16b
mov CT4.16b, BLK2.16b
mov CT5.16b, BLK3.16b
AES_ENC_5_BLKS KEY CT1.16b CT2.16b CT3.16b CT4.16b CT5.16b RDK0.4s RDK1.4s RDK0.16b RDK1.16b ROUNDS
mov IVC.16b, BLK4.16b
eor BLK0.16b, BLK0.16b, CT1.16b
eor BLK1.16b, BLK1.16b, CT2.16b
eor BLK2.16b, BLK2.16b, CT3.16b
eor BLK3.16b, BLK3.16b, CT4.16b
eor BLK4.16b, BLK4.16b, CT5.16b
st1 {BLK0.16b, BLK1.16b, BLK2.16b, BLK3.16b}, [OUT], #64
st1 {BLK4.16b}, [OUT]
b .Lcfb_aesdec_finish
.Lcfb_dec_proc_6_blks:
ld1 {BLK0.16b, BLK1.16b, BLK2.16b, BLK3.16b}, [IN], #64
ld1 {BLK4.16b, BLK5.16b}, [IN]
mov CT1.16b, IVC.16b
mov CT2.16b, BLK0.16b
mov CT3.16b, BLK1.16b
mov CT4.16b, BLK2.16b
mov CT5.16b, BLK3.16b
mov CT6.16b, BLK4.16b
AES_ENC_6_BLKS KEY CT1.16b CT2.16b CT3.16b CT4.16b CT5.16b CT6.16b RDK0.4s RDK1.4s RDK0.16b RDK1.16b ROUNDS
mov IVC.16b, BLK5.16b
eor BLK0.16b, BLK0.16b, CT1.16b
eor BLK1.16b, BLK1.16b, CT2.16b
eor BLK2.16b, BLK2.16b, CT3.16b
eor BLK3.16b, BLK3.16b, CT4.16b
eor BLK4.16b, BLK4.16b, CT5.16b
eor BLK5.16b, BLK5.16b, CT6.16b
st1 {BLK0.16b, BLK1.16b, BLK2.16b, BLK3.16b}, [OUT], #64
st1 {BLK4.16b, BLK5.16b}, [OUT]
b .Lcfb_aesdec_finish
.Lcfb_dec_proc_7_blks:
ld1 {BLK0.16b, BLK1.16b, BLK2.16b, BLK3.16b}, [IN], #64
ld1 {BLK4.16b, BLK5.16b, BLK6.16b}, [IN]
mov CT1.16b, IVC.16b
mov CT2.16b, BLK0.16b
mov CT3.16b, BLK1.16b
mov CT4.16b, BLK2.16b
mov CT5.16b, BLK3.16b
mov CT6.16b, BLK4.16b
mov CT7.16b, BLK5.16b
AES_ENC_7_BLKS KEY CT1.16b CT2.16b CT3.16b CT4.16b CT5.16b CT6.16b CT7.16b RDK0.4s RDK1.4s RDK0.16b RDK1.16b ROUNDS
mov IVC.16b, BLK6.16b
eor BLK0.16b, BLK0.16b, CT1.16b
eor BLK1.16b, BLK1.16b, CT2.16b
eor BLK2.16b, BLK2.16b, CT3.16b
eor BLK3.16b, BLK3.16b, CT4.16b
eor BLK4.16b, BLK4.16b, CT5.16b
eor BLK5.16b, BLK5.16b, CT6.16b
eor BLK6.16b, BLK6.16b, CT7.16b
st1 {BLK0.16b, BLK1.16b, BLK2.16b, BLK3.16b}, [OUT], #64
st1 {BLK4.16b, BLK5.16b, BLK6.16b}, [OUT]
.Lcfb_aesdec_finish:
st1 {IVC.16b}, [IV]
.Lcfb_len_zero:
mov x0, #0
eor CT1.16b, CT1.16b, CT1.16b
eor CT2.16b, CT2.16b, CT2.16b
eor CT3.16b, CT3.16b, CT3.16b
eor CT4.16b, CT4.16b, CT4.16b
eor CT5.16b, CT5.16b, CT5.16b
eor CT6.16b, CT6.16b, CT6.16b
eor RDK0.16b, RDK0.16b, RDK0.16b
eor RDK1.16b, RDK1.16b, RDK1.16b
AARCH64_AUTIASP
ret
.size CRYPT_AES_CFB_Decrypt, .-CRYPT_AES_CFB_Decrypt
#endif
| 2302_82127028/openHiTLS-examples_1508 | crypto/aes/src/asm/crypt_aes_cfb_armv8.S | Unix Assembly | unknown | 7,951 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#include "hitls_build.h"
#if defined(HITLS_CRYPTO_AES) && defined(HITLS_CRYPTO_CTR)
#include "crypt_arm.h"
#include "crypt_aes_macro_armv8.s"
.file "crypt_aes_ctr_armv8.S"
.text
.arch armv8-a+crypto
.align 5
KEY .req x0
IN .req x1
OUT .req x2
LEN .req x3
IV .req x4
LTMP .req x12
CTMP .req v27
BLK0 .req v0
BLK1 .req v1
BLK2 .req v2
BLK3 .req v3
BLK4 .req v4
BLK5 .req v5
BLK6 .req v6
BLK7 .req v7
CTR0 .req v19
CTR1 .req v20
CTR2 .req v21
CTR3 .req v22
CTR4 .req v23
CTR5 .req v24
CTR6 .req v25
CTR7 .req v26
RDK0 .req v17
RDK1 .req v18
ROUNDS .req w6
/* ctr + 1 */
.macro ADDCTR ctr
#ifndef HITLS_BIG_ENDIAN
add w11, w11, #1
rev w9, w11
mov \ctr, w9
#else
rev w11, w11
add w11, w11, #1
rev w11, w11
mov \ctr, w11
#endif
.endm
/*
* Vn - V0 ~ V31
* 8bytes - Vn.8B Vn.4H Vn.2S Vn.1D
* 16bytes - Vn.16B Vn.8H Vn.4S Vn.2D
*/
/*
* int32_t CRYPT_AES_CTR_Encrypt(const CRYPT_AES_Key *ctx,
* const uint8_t *in,
* uint8_t *out,
* uint32_t len,
* uint8_t *iv);
*/
.globl CRYPT_AES_CTR_Encrypt
.type CRYPT_AES_CTR_Encrypt, %function
CRYPT_AES_CTR_Encrypt:
AARCH64_PACIASP
ld1 {CTR0.16b}, [IV] // Reads the IV.
mov CTMP.16b, CTR0.16b
mov w11, CTR0.s[3]
#ifndef HITLS_BIG_ENDIAN
rev w11, w11
#endif
mov LTMP, LEN
.Lctr_aesenc_start:
cmp LTMP, #64
b.ge .Lctr_enc_above_equal_4_blks
cmp LTMP, #32
b.ge .Lctr_enc_above_equal_2_blks
cmp LTMP, #0
b.eq .Lctr_len_zero
b .Lctr_enc_proc_1_blk
.Lctr_enc_above_equal_2_blks:
cmp LTMP, #48
b.lt .Lctr_enc_proc_2_blks
b .Lctr_enc_proc_3_blks
.Lctr_enc_above_equal_4_blks:
cmp LTMP, #96
b.ge .Lctr_enc_above_equal_6_blks
cmp LTMP, #80
b.lt .Lctr_enc_proc_4_blks
b .Lctr_enc_proc_5_blks
.Lctr_enc_above_equal_6_blks:
cmp LTMP, #112
b.lt .Lctr_enc_proc_6_blks
cmp LTMP, #128
b.lt .Lctr_enc_proc_7_blks
.Lctr_enc_proc_8_blks:
/* When the length is greater than or equal to 128, eight blocks loop is used. */
.Lctr_aesenc_8_blks_loop:
/* Calculate eight CTRs. */
mov CTR1.16b, CTMP.16b
mov CTR2.16b, CTMP.16b
mov CTR3.16b, CTMP.16b
mov CTR4.16b, CTMP.16b
mov CTR5.16b, CTMP.16b
mov CTR6.16b, CTMP.16b
mov CTR7.16b, CTMP.16b
ADDCTR CTR1.s[3]
ADDCTR CTR2.s[3]
ADDCTR CTR3.s[3]
ADDCTR CTR4.s[3]
ADDCTR CTR5.s[3]
ADDCTR CTR6.s[3]
ADDCTR CTR7.s[3]
mov x14, KEY // Prevent the key from being changed.
AES_ENC_8_BLKS x14 CTR0.16b CTR1.16b CTR2.16b CTR3.16b CTR4.16b \
CTR5.16b CTR6.16b CTR7.16b RDK0.4s RDK1.4s RDK0.16b RDK1.16b ROUNDS
ld1 {BLK0.16b, BLK1.16b, BLK2.16b, BLK3.16b}, [IN], #64
ld1 {BLK4.16b, BLK5.16b, BLK6.16b, BLK7.16b}, [IN], #64
eor BLK0.16b, BLK0.16b, CTR0.16b
eor BLK1.16b, BLK1.16b, CTR1.16b
eor BLK2.16b, BLK2.16b, CTR2.16b
eor BLK3.16b, BLK3.16b, CTR3.16b
eor BLK4.16b, BLK4.16b, CTR4.16b
eor BLK5.16b, BLK5.16b, CTR5.16b
eor BLK6.16b, BLK6.16b, CTR6.16b
eor BLK7.16b, BLK7.16b, CTR7.16b
st1 {BLK0.16b, BLK1.16b, BLK2.16b, BLK3.16b}, [OUT], #64
st1 {BLK4.16b, BLK5.16b, BLK6.16b, BLK7.16b}, [OUT], #64
sub LTMP, LTMP, #128
cmp LTMP, #0
b.eq .Lctr_aesenc_finish
ADDCTR CTMP.s[3]
mov CTR0.16b, CTMP.16b
cmp LTMP, #128
b.lt .Lctr_aesenc_start
b .Lctr_aesenc_8_blks_loop
.Lctr_enc_proc_1_blk:
AES_ENC_1_BLK KEY CTR0.16b RDK0.4s RDK1.4s RDK0.16b RDK1.16b ROUNDS
ld1 {BLK0.16b}, [IN]
eor BLK0.16b, CTR0.16b, BLK0.16b
st1 {BLK0.16b}, [OUT]
b .Lctr_aesenc_finish
.Lctr_enc_proc_2_blks:
mov CTR1.16b, CTMP.16b
ADDCTR CTR1.s[3]
AES_ENC_2_BLKS KEY CTR0.16b CTR1.16b RDK0.4s RDK1.4s RDK0.16b RDK1.16b ROUNDS
ld1 {BLK0.16b, BLK1.16b}, [IN]
eor BLK0.16b, CTR0.16b, BLK0.16b
eor BLK1.16b, CTR1.16b, BLK1.16b
st1 {BLK0.16b, BLK1.16b}, [OUT]
b .Lctr_aesenc_finish
.Lctr_enc_proc_3_blks:
mov CTR1.16b, CTMP.16b
mov CTR2.16b, CTMP.16b
ADDCTR CTR1.s[3]
ADDCTR CTR2.s[3]
AES_ENC_3_BLKS KEY CTR0.16b CTR1.16b CTR2.16b RDK0.4s RDK1.4s RDK0.16b RDK1.16b ROUNDS
ld1 {BLK0.16b, BLK1.16b, BLK2.16b}, [IN]
eor BLK0.16b, BLK0.16b, CTR0.16b
eor BLK1.16b, BLK1.16b, CTR1.16b
eor BLK2.16b, BLK2.16b, CTR2.16b
st1 {BLK0.16b, BLK1.16b, BLK2.16b}, [OUT]
b .Lctr_aesenc_finish
.Lctr_enc_proc_4_blks:
mov CTR1.16b, CTMP.16b
mov CTR2.16b, CTMP.16b
mov CTR3.16b, CTMP.16b
ADDCTR CTR1.s[3]
ADDCTR CTR2.s[3]
ADDCTR CTR3.s[3]
AES_ENC_4_BLKS KEY CTR0.16b CTR1.16b CTR2.16b CTR3.16b RDK0.4s RDK1.4s RDK0.16b RDK1.16b ROUNDS
ld1 {BLK0.16b, BLK1.16b, BLK2.16b, BLK3.16b}, [IN]
eor BLK0.16b, BLK0.16b, CTR0.16b
eor BLK1.16b, BLK1.16b, CTR1.16b
eor BLK2.16b, BLK2.16b, CTR2.16b
eor BLK3.16b, BLK3.16b, CTR3.16b
st1 {BLK0.16b, BLK1.16b, BLK2.16b, BLK3.16b}, [OUT]
b .Lctr_aesenc_finish
.Lctr_enc_proc_5_blks:
mov CTR1.16b, CTMP.16b
mov CTR2.16b, CTMP.16b
mov CTR3.16b, CTMP.16b
mov CTR4.16b, CTMP.16b
ADDCTR CTR1.s[3]
ADDCTR CTR2.s[3]
ADDCTR CTR3.s[3]
ADDCTR CTR4.s[3]
AES_ENC_5_BLKS KEY CTR0.16b CTR1.16b CTR2.16b CTR3.16b CTR4.16b RDK0.4s RDK1.4s RDK0.16b RDK1.16b ROUNDS
ld1 {BLK0.16b, BLK1.16b, BLK2.16b, BLK3.16b}, [IN], #64
ld1 {BLK4.16b}, [IN]
eor BLK0.16b, BLK0.16b, CTR0.16b
eor BLK1.16b, BLK1.16b, CTR1.16b
eor BLK2.16b, BLK2.16b, CTR2.16b
eor BLK3.16b, BLK3.16b, CTR3.16b
eor BLK4.16b, BLK4.16b, CTR4.16b
st1 {BLK0.16b, BLK1.16b, BLK2.16b, BLK3.16b}, [OUT], #64
st1 {BLK4.16b}, [OUT]
b .Lctr_aesenc_finish
.Lctr_enc_proc_6_blks:
mov CTR1.16b, CTMP.16b
mov CTR2.16b, CTMP.16b
mov CTR3.16b, CTMP.16b
mov CTR4.16b, CTMP.16b
mov CTR5.16b, CTMP.16b
ADDCTR CTR1.s[3]
ADDCTR CTR2.s[3]
ADDCTR CTR3.s[3]
ADDCTR CTR4.s[3]
ADDCTR CTR5.s[3]
AES_ENC_6_BLKS KEY CTR0.16b CTR1.16b CTR2.16b CTR3.16b CTR4.16b \
CTR5.16b RDK0.4s RDK1.4s RDK0.16b RDK1.16b ROUNDS
ld1 {BLK0.16b, BLK1.16b, BLK2.16b, BLK3.16b}, [IN], #64
ld1 {BLK4.16b, BLK5.16b}, [IN]
eor BLK0.16b, BLK0.16b, CTR0.16b
eor BLK1.16b, BLK1.16b, CTR1.16b
eor BLK2.16b, BLK2.16b, CTR2.16b
eor BLK3.16b, BLK3.16b, CTR3.16b
eor BLK4.16b, BLK4.16b, CTR4.16b
eor BLK5.16b, BLK5.16b, CTR5.16b
st1 {BLK0.16b, BLK1.16b, BLK2.16b, BLK3.16b}, [OUT], #64
st1 {BLK4.16b, BLK5.16b}, [OUT]
b .Lctr_aesenc_finish
.Lctr_enc_proc_7_blks:
mov CTR1.16b, CTMP.16b
mov CTR2.16b, CTMP.16b
mov CTR3.16b, CTMP.16b
mov CTR4.16b, CTMP.16b
mov CTR5.16b, CTMP.16b
mov CTR6.16b, CTMP.16b
ADDCTR CTR1.s[3]
ADDCTR CTR2.s[3]
ADDCTR CTR3.s[3]
ADDCTR CTR4.s[3]
ADDCTR CTR5.s[3]
ADDCTR CTR6.s[3]
AES_ENC_7_BLKS KEY CTR0.16b CTR1.16b CTR2.16b CTR3.16b CTR4.16b \
CTR5.16b CTR6.16b RDK0.4s RDK1.4s RDK0.16b RDK1.16b ROUNDS
ld1 {BLK0.16b, BLK1.16b, BLK2.16b, BLK3.16b}, [IN], #64
ld1 {BLK4.16b, BLK5.16b, BLK6.16b}, [IN]
eor BLK0.16b, BLK0.16b, CTR0.16b
eor BLK1.16b, BLK1.16b, CTR1.16b
eor BLK2.16b, BLK2.16b, CTR2.16b
eor BLK3.16b, BLK3.16b, CTR3.16b
eor BLK4.16b, BLK4.16b, CTR4.16b
eor BLK5.16b, BLK5.16b, CTR5.16b
eor BLK6.16b, BLK6.16b, CTR6.16b
st1 {BLK0.16b, BLK1.16b, BLK2.16b, BLK3.16b}, [OUT], #64
st1 {BLK4.16b, BLK5.16b, BLK6.16b}, [OUT]
.Lctr_aesenc_finish:
ADDCTR CTMP.s[3] // Fill CTR0 for the next round.
st1 {CTMP.16b}, [IV]
.Lctr_len_zero:
mov x0, #0
eor CTR0.16b, CTR0.16b, CTR0.16b
eor CTR1.16b, CTR1.16b, CTR1.16b
eor CTR2.16b, CTR2.16b, CTR2.16b
eor CTR3.16b, CTR3.16b, CTR3.16b
eor CTR4.16b, CTR4.16b, CTR4.16b
eor CTR5.16b, CTR5.16b, CTR5.16b
eor CTR6.16b, CTR6.16b, CTR6.16b
eor CTR7.16b, CTR7.16b, CTR7.16b
eor RDK0.16b, RDK0.16b, RDK0.16b
eor RDK1.16b, RDK1.16b, RDK1.16b
AARCH64_AUTIASP
ret
.size CRYPT_AES_CTR_Encrypt, .-CRYPT_AES_CTR_Encrypt
#endif
| 2302_82127028/openHiTLS-examples_1508 | crypto/aes/src/asm/crypt_aes_ctr_armv8.S | Unix Assembly | unknown | 9,199 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#include "hitls_build.h"
#if defined(HITLS_CRYPTO_AES) && defined(HITLS_CRYPTO_CTR)
.file "crypt_aes_ctr_x86_64.S"
.text
.set KEY, %rdi
.set INPUT, %rsi
.set OUTPUT, %rdx
.set LEN, %ecx
.set CTR_IV, %r8
.set RDK, %xmm0
.set RDK2, %xmm1
.set KTMP, %r13
.set ROUNDS, %eax
.set RET, %eax
.set IV0, %xmm2
.set IV1, %xmm3
.set IV2, %xmm4
.set IV3, %xmm5
.set IV4, %xmm6
.set IV5, %xmm7
.set IV6, %xmm8
.set IV7, %xmm9
.set BLK0, %xmm10
.set BLK1, %xmm11
.set BLK2, %xmm12
.set BLK3, %xmm13
.set BLK4, %xmm14
.set BLK5, %xmm15
/**
* Macro description: Eight IVs are encrypted.
* Input register:
* Key: Round key.
* block0-7: Encrypted IV.
* Modify the register: block0-7.
* Output register:
* block0-7: IV after a round of encryption.
*/
.macro ONE_ENC key block0 block1 block2 block3 block4 block5 block6 block7
aesenc \key, \block0
aesenc \key, \block1
aesenc \key, \block2
aesenc \key, \block3
aesenc \key, \block4
aesenc \key, \block5
aesenc \key, \block6
aesenc \key, \block7
.endm
/**
* Macro description: Obtains a new ctr and XORs it with the round key.
* input register:
* ctr32:Initialization vector.
* offset:Offset.
* temp:32-bit CTR temporary register.
* key32:32-bit round key.
* addrOffset:push stack address offset.
* addr:push stack address.
* Modify the register: Temp.
*/
.macro XOR_KEY ctr32 offset temp key32 addrOffset addr
leal \offset(\ctr32), \temp // XOR 32-bit ctr and key, push into the stack
bswapl \temp
xorl \key32, \temp
movl \temp, \addrOffset+12(\addr)
.endm
/**
* Macro description: Obtain the round key, encrypt the IV, obtain the next round of ctr, and XOR the round key.
* Input register:
* Key: pointer to the key.
* Offset: round key offset.
* Temp: Temporary register for the round key.
* Ctr32: initialization vector.
* Offset2: Ctr offset.
* Temp2: 32-bit CTR temporary register.
* Key32: 32-bit round key.
* AddrOffset: Offest of entering the stack.
* Addr: Address for entering the stack.
* Modify register: Temp temp2 IV0-7.
* Output register:
* IV0-7: IV after a round of encryption.
*/
.macro ONE_ENC_XOR_KEY key offset temp ctr32 offset2 temp2 key32 addrOffset addr
vmovdqu \offset(\key), \temp
aesenc \temp, IV0
leal \offset2(\ctr32), \temp2 // XOR 32-bit ctr and key, push stack.
aesenc \temp, IV1
bswapl \temp2
aesenc \temp, IV2
aesenc \temp, IV3
xorl \key32, \temp2
aesenc \temp, IV4
aesenc \temp, IV5
movl \temp2, \addrOffset+12(\addr)
aesenc \temp, IV6
aesenc \temp, IV7
.endm
/**
* Macro description: Update the in and out pointer offsets and the remaining length of len.
* Input register:
* Input:pointer to the input memory.
* Output:pointer to the output memory.
* Len:remaining data length.
* Offset:indicates the offset.
* Modify the register: Input output len.
* Output register:
* Input output len
*/
.macro UPDATE_DATA input output len offset
leaq \offset(\input), \input
leaq \offset(\output), \output
subl $\offset, \len
.endm
/**
* Function description:Sets the AES encrypted assembly acceleration API, ctr mode.
* Function prototype:int32_t CRYPT_AES_CTR_Encrypt(const CRYPT_AES_Key *ctx, const uint8_t *in, uint8_t *out,
* uint32_t len, uint8_t *iv);
* Input register:
* rdi:Pointer to the input key structure.
* rsi:Points to the 128-bit input data.
* rdx:Points to the 128-bit output data.
* rcx:Length of the data block, that is, 16 bytes.
* r8: 16-byte initialization vector.
* Change register:xmm1, xmm3, xmm4, xmm5, xmm6, xmm10, xmm11, xmm12, xmm13.
* Output register:rdx, r8.
*/
.globl CRYPT_AES_CTR_Encrypt
.type CRYPT_AES_CTR_Encrypt, @function
CRYPT_AES_CTR_Encrypt:
.cfi_startproc
pushq %r12
pushq %r13
pushq %r14
pushq %r15
mov %rsp, %r12
subq $128, %rsp // Declare for 128-byte stack space.
andq $-16, %rsp
vmovdqu (KEY), RDK
vpxor (CTR_IV), RDK, IV0
vmovdqa IV0, 0(%rsp)
vmovdqa IV0, 16(%rsp)
vmovdqa IV0, 32(%rsp)
vmovdqa IV0, 48(%rsp)
vmovdqa IV0, 64(%rsp)
vmovdqa IV0, 80(%rsp)
vmovdqa IV0, 96(%rsp)
vmovdqa IV0, 112(%rsp)
movl 12(CTR_IV), %r11d // Read 32-bit ctr.
movl 12(KEY), %r9d // Read 32-bit key.
bswap %r11d
mov LEN, %r14d
shr $4, %r14d
and $7, %r14d
cmp $1, %r14d
je .Lctr_enc_proc_1_blk
cmp $2, %r14d
je .Lctr_enc_proc_2_blk
cmp $3, %r14d
je .Lctr_enc_proc_3_blk
cmp $4, %r14d
je .Lctr_enc_proc_4_blk
cmp $5, %r14d
je .Lctr_enc_proc_5_blk
cmp $6, %r14d
je .Lctr_enc_proc_6_blk
cmp $7, %r14d
je .Lctr_enc_proc_7_blk
.Lctr_enc_proc_8_blk:
cmp $0, LEN
je .Lctr_aesenc_finish
leal 0(%r11d), %r15d
leal 1(%r11d), %r10d
bswapl %r15d
bswapl %r10d
xorl %r9d, %r15d
xorl %r9d, %r10d
leal 2(%r11d), %r14d
movl %r15d, 12(%rsp)
bswapl %r14d
movl %r10d, 16+12(%rsp)
xorl %r9d, %r14d
leal 3(%r11d), %r15d
leal 4(%r11d), %r10d
bswapl %r15d
bswapl %r10d
movl %r14d, 32+12(%rsp)
xorl %r9d, %r15d
xorl %r9d, %r10d
movl %r15d, 48+12(%rsp)
leal 5(%r11d), %r14d
bswapl %r14d
movl %r10d, 64+12(%rsp)
xorl %r9d, %r14d
leal 6(%r11d), %r15d
leal 7(%r11d), %r10d
movl %r14d, 80+12(%rsp)
bswapl %r15d
bswapl %r10d
xorl %r9d, %r15d
xorl %r9d, %r10d
movl %r15d, 96+12(%rsp)
movl %r10d, 112+12(%rsp)
vmovdqa (%rsp), IV0
vmovdqa 16(%rsp), IV1
vmovdqa 32(%rsp), IV2
vmovdqa 48(%rsp), IV3
vmovdqa 64(%rsp), IV4
vmovdqa 80(%rsp), IV5
vmovdqa 96(%rsp), IV6
vmovdqa 112(%rsp), IV7
.align 16
.Lctr_aesenc_8_blks_enc_loop:
addl $8, %r11d // ctr+8
movl 240(KEY), ROUNDS
ONE_ENC_XOR_KEY KEY, 16, RDK2, %r11d, 0, %r10d, %r9d, 0, %rsp // Round 1 encryption
ONE_ENC_XOR_KEY KEY, 32, RDK2, %r11d, 1, %r10d, %r9d, 16, %rsp // Round 2 encryption
ONE_ENC_XOR_KEY KEY, 48, RDK2, %r11d, 2, %r10d, %r9d, 32, %rsp // Round 3 encryption
ONE_ENC_XOR_KEY KEY, 64, RDK2, %r11d, 3, %r10d, %r9d, 48, %rsp // Round 4 encryption
ONE_ENC_XOR_KEY KEY, 80, RDK2, %r11d, 4, %r10d, %r9d, 64, %rsp // Round 5 encryption
ONE_ENC_XOR_KEY KEY, 96, RDK2, %r11d, 5, %r10d, %r9d, 80, %rsp // Round 6 encryption
ONE_ENC_XOR_KEY KEY, 112, RDK2, %r11d, 6, %r10d, %r9d, 96, %rsp // Round 7 encryption
ONE_ENC_XOR_KEY KEY, 128, RDK2, %r11d, 7, %r10d, %r9d, 112, %rsp // Round 8 encryption
vmovdqu 144(KEY), RDK // Round 9 key Load
vmovdqu 160(KEY), RDK2 // Round 10 key Load
cmp $12, ROUNDS
jb .Lctr_aesenc_8_blks_enc_last
ONE_ENC RDK, IV0, IV1, IV2, IV3, IV4, IV5, IV6, IV7 // Round 9 encryption
vmovdqu 176(KEY), RDK // Round 11 key Load
ONE_ENC RDK2, IV0, IV1, IV2, IV3, IV4, IV5, IV6, IV7 // Round 10 encryption
vmovdqu 192(KEY), RDK2 // Round 12 key Load
je .Lctr_aesenc_8_blks_enc_last
ONE_ENC RDK, IV0, IV1, IV2, IV3, IV4, IV5, IV6, IV7 // Round 11 encryption
vmovdqu 208(KEY), RDK // Round 13 key Load
ONE_ENC RDK2, IV0, IV1, IV2, IV3, IV4, IV5, IV6, IV7 // Round 12 encryption
vmovdqu 224(KEY), RDK2 // Round 14 key Load
.align 16
.Lctr_aesenc_8_blks_enc_last:
vpxor (INPUT), RDK2, BLK0 // Last round Key ^ Plaintext.
vpxor 16(INPUT), RDK2, BLK1
vpxor 32(INPUT), RDK2, BLK2
vpxor 48(INPUT), RDK2, BLK3
ONE_ENC RDK, IV0, IV1, IV2, IV3, IV4, IV5, IV6, IV7
aesenclast BLK0, IV0 // Last round of encryption.
aesenclast BLK1, IV1
aesenclast BLK2, IV2
aesenclast BLK3, IV3
aesenclast RDK2, IV4
aesenclast RDK2, IV5
aesenclast RDK2, IV6
aesenclast RDK2, IV7
vmovdqu IV0, (OUTPUT) // The first four ciphertexts are stored in out.
vmovdqu IV1, 16(OUTPUT)
vmovdqu IV2, 32(OUTPUT)
vmovdqu IV3, 48(OUTPUT)
vpxor 64(INPUT), IV4, BLK0 // Last Round Key ^ Plaintext.
vpxor 80(INPUT), IV5, BLK1
vpxor 96(INPUT), IV6, BLK2
vpxor 112(INPUT), IV7, BLK3
vmovdqu BLK0, 64(OUTPUT)
vmovdqu BLK1, 80(OUTPUT)
vmovdqu BLK2, 96(OUTPUT) // The last four ciphertexts are stored in out.
vmovdqu BLK3, 112(OUTPUT)
vmovdqa (%rsp), IV0 // Reads the next round of ctr from the stack.
vmovdqa 16(%rsp), IV1
vmovdqa 32(%rsp), IV2
vmovdqa 48(%rsp), IV3
vmovdqa 64(%rsp), IV4
vmovdqa 80(%rsp), IV5
vmovdqa 96(%rsp), IV6
vmovdqa 112(%rsp), IV7
UPDATE_DATA INPUT, OUTPUT, LEN, 128
cmpl $0, LEN
jbe .Lctr_aesenc_finish
jmp .Lctr_aesenc_8_blks_enc_loop
.Lctr_enc_proc_1_blk:
movl 240(KEY), ROUNDS
movq KEY, KTMP
decl ROUNDS
.align 16
.Laesenc_loop:
leaq 16(KTMP), KTMP
vmovdqu (KTMP), RDK
aesenc RDK, IV0
decl ROUNDS
jnz .Laesenc_loop // Loop the loop until the ROUNDS is 0.
leaq 16(KTMP), KTMP
vmovdqu (KTMP), RDK
aesenclast RDK, IV0
addl $1, %r11d // Update ctr32.
vpxor (INPUT), IV0, BLK0
vmovdqu BLK0, (OUTPUT) // Ciphertext stored in out.
UPDATE_DATA INPUT, OUTPUT, LEN, 16
jmp .Lctr_enc_proc_8_blk
.Lctr_enc_proc_2_blk:
movl 240(KEY), ROUNDS
movq KEY, KTMP
decl ROUNDS
XOR_KEY %r11d, 1, %r10d, %r9d, 16, %rsp
vmovdqa 16(%rsp), IV1
.align 16
.Laesenc_2_blks_loop:
leaq 16(KTMP), KTMP
vmovdqu (KTMP), RDK
aesenc RDK, IV0
aesenc RDK, IV1
decl ROUNDS
jnz .Laesenc_2_blks_loop
leaq 16(KTMP), KTMP
vmovdqu (KTMP), RDK
aesenclast RDK, IV0
aesenclast RDK, IV1
vpxor (INPUT), IV0, BLK0
vpxor 16(INPUT), IV1, BLK1
vmovdqu BLK0, (OUTPUT)
vmovdqu BLK1, 16(OUTPUT)
addl $2, %r11d
UPDATE_DATA INPUT, OUTPUT, LEN, 32
jmp .Lctr_enc_proc_8_blk
.Lctr_enc_proc_3_blk:
movl 240(KEY), ROUNDS
movq KEY, KTMP
decl ROUNDS
XOR_KEY %r11d, 1, %r10d, %r9d, 16, %rsp
XOR_KEY %r11d, 2, %r10d, %r9d, 32, %rsp
vmovdqa 16(%rsp), IV1
vmovdqa 32(%rsp), IV2
.align 16
.Laesenc_3_blks_loop:
leaq 16(KTMP), KTMP
vmovdqu (KTMP), RDK
aesenc RDK, IV0
aesenc RDK, IV1
aesenc RDK, IV2
decl ROUNDS
jnz .Laesenc_3_blks_loop
leaq 16(KTMP), KTMP
vmovdqu (KTMP), RDK
aesenclast RDK, IV0
aesenclast RDK, IV1
aesenclast RDK, IV2
vpxor (INPUT), IV0, BLK0
vpxor 16(INPUT), IV1, BLK1
vpxor 32(INPUT), IV2, BLK2
vmovdqu BLK0, (OUTPUT)
vmovdqu BLK1, 16(OUTPUT)
vmovdqu BLK2, 32(OUTPUT)
addl $3, %r11d
UPDATE_DATA INPUT, OUTPUT, LEN, 48
jmp .Lctr_enc_proc_8_blk
.Lctr_enc_proc_4_blk:
movl 240(KEY), ROUNDS
movq KEY, KTMP
decl ROUNDS
XOR_KEY %r11d, 1, %r10d, %r9d, 16, %rsp
XOR_KEY %r11d, 2, %r10d, %r9d, 32, %rsp
XOR_KEY %r11d, 3, %r10d, %r9d, 48, %rsp
vmovdqa 16(%rsp), IV1
vmovdqa 32(%rsp), IV2
vmovdqa 48(%rsp), IV3
.align 16
.Laesenc_4_blks_loop:
leaq 16(KTMP), KTMP
vmovdqu (KTMP), RDK
aesenc RDK, IV0
aesenc RDK, IV1
aesenc RDK, IV2
aesenc RDK, IV3
decl ROUNDS
jnz .Laesenc_4_blks_loop
leaq 16(KTMP), KTMP
vmovdqu (KTMP), RDK
aesenclast RDK, IV0
aesenclast RDK, IV1
aesenclast RDK, IV2
aesenclast RDK, IV3
vpxor (INPUT), IV0, BLK0
vpxor 16(INPUT), IV1, BLK1
vpxor 32(INPUT), IV2, BLK2
vpxor 48(INPUT), IV3, BLK3
vmovdqu BLK0, (OUTPUT)
vmovdqu BLK1, 16(OUTPUT)
vmovdqu BLK2, 32(OUTPUT)
vmovdqu BLK3, 48(OUTPUT)
addl $4, %r11d
UPDATE_DATA INPUT, OUTPUT, LEN, 64
jmp .Lctr_enc_proc_8_blk
.Lctr_enc_proc_5_blk:
movl 240(KEY), ROUNDS
movq KEY, KTMP
decl ROUNDS
XOR_KEY %r11d, 1, %r10d, %r9d, 16, %rsp
XOR_KEY %r11d, 2, %r10d, %r9d, 32, %rsp
XOR_KEY %r11d, 3, %r10d, %r9d, 48, %rsp
XOR_KEY %r11d, 4, %r10d, %r9d, 64, %rsp
vmovdqa 16(%rsp), IV1
vmovdqa 32(%rsp), IV2
vmovdqa 48(%rsp), IV3
vmovdqa 64(%rsp), IV4
.align 16
.Laesenc_5_blks_loop:
leaq 16(KTMP), KTMP
vmovdqu (KTMP), RDK
aesenc RDK, IV0
aesenc RDK, IV1
aesenc RDK, IV2
aesenc RDK, IV3
aesenc RDK, IV4
decl ROUNDS
jnz .Laesenc_5_blks_loop
leaq 16(KTMP), KTMP
vmovdqu (KTMP), RDK
aesenclast RDK, IV0
aesenclast RDK, IV1
aesenclast RDK, IV2
aesenclast RDK, IV3
aesenclast RDK, IV4
vpxor (INPUT), IV0, BLK0
vpxor 16(INPUT), IV1, BLK1
vpxor 32(INPUT), IV2, BLK2
vpxor 48(INPUT), IV3, BLK3
vpxor 64(INPUT), IV4, BLK4
vmovdqu BLK0, (OUTPUT)
vmovdqu BLK1, 16(OUTPUT)
vmovdqu BLK2, 32(OUTPUT)
vmovdqu BLK3, 48(OUTPUT)
vmovdqu BLK4, 64(OUTPUT)
addl $5, %r11d
UPDATE_DATA INPUT, OUTPUT, LEN, 80
jmp .Lctr_enc_proc_8_blk
.Lctr_enc_proc_6_blk:
movl 240(KEY), ROUNDS
movq KEY, KTMP
decl ROUNDS
XOR_KEY %r11d, 1, %r10d, %r9d, 16, %rsp
XOR_KEY %r11d, 2, %r10d, %r9d, 32, %rsp
XOR_KEY %r11d, 3, %r10d, %r9d, 48, %rsp
XOR_KEY %r11d, 4, %r10d, %r9d, 64, %rsp
XOR_KEY %r11d, 5, %r10d, %r9d, 80, %rsp
vmovdqa 16(%rsp), IV1
vmovdqa 32(%rsp), IV2
vmovdqa 48(%rsp), IV3
vmovdqa 64(%rsp), IV4
vmovdqa 80(%rsp), IV5
.align 16
.Laesenc_6_blks_loop:
leaq 16(KTMP), KTMP
vmovdqu (KTMP), RDK
aesenc RDK, IV0
aesenc RDK, IV1
aesenc RDK, IV2
aesenc RDK, IV3
aesenc RDK, IV4
aesenc RDK, IV5
decl ROUNDS
jnz .Laesenc_6_blks_loop
leaq 16(KTMP), KTMP
vmovdqu (KTMP), RDK
aesenclast RDK, IV0
aesenclast RDK, IV1
aesenclast RDK, IV2
aesenclast RDK, IV3
aesenclast RDK, IV4
aesenclast RDK, IV5
vpxor (INPUT), IV0, BLK0
vpxor 16(INPUT), IV1, BLK1
vpxor 32(INPUT), IV2, BLK2
vpxor 48(INPUT), IV3, BLK3
vpxor 64(INPUT), IV4, BLK4
vpxor 80(INPUT), IV5, BLK5
vmovdqu BLK0, (OUTPUT)
vmovdqu BLK1, 16(OUTPUT)
vmovdqu BLK2, 32(OUTPUT)
vmovdqu BLK3, 48(OUTPUT)
vmovdqu BLK4, 64(OUTPUT)
vmovdqu BLK5, 80(OUTPUT)
addl $6, %r11d
UPDATE_DATA INPUT, OUTPUT, LEN, 96
jmp .Lctr_enc_proc_8_blk
.Lctr_enc_proc_7_blk:
movl 240(KEY), ROUNDS
movq KEY, KTMP
decl ROUNDS
XOR_KEY %r11d, 1, %r10d, %r9d, 16, %rsp
XOR_KEY %r11d, 2, %r10d, %r9d, 32, %rsp
XOR_KEY %r11d, 3, %r10d, %r9d, 48, %rsp
XOR_KEY %r11d, 4, %r10d, %r9d, 64, %rsp
XOR_KEY %r11d, 5, %r10d, %r9d, 80, %rsp
XOR_KEY %r11d, 6, %r10d, %r9d, 96, %rsp
vmovdqa 16(%rsp), IV1
vmovdqa 32(%rsp), IV2
vmovdqa 48(%rsp), IV3
vmovdqa 64(%rsp), IV4
vmovdqa 80(%rsp), IV5
vmovdqa 96(%rsp), IV6
.align 16
.Laesenc_7_blks_loop:
leaq 16(KTMP), KTMP
vmovdqu (KTMP), RDK
aesenc RDK, IV0
aesenc RDK, IV1
aesenc RDK, IV2
aesenc RDK, IV3
aesenc RDK, IV4
aesenc RDK, IV5
aesenc RDK, IV6
decl ROUNDS
jnz .Laesenc_7_blks_loop
leaq 16(KTMP), KTMP
vmovdqu (KTMP), RDK
aesenclast RDK, IV0
aesenclast RDK, IV1
aesenclast RDK, IV2
aesenclast RDK, IV3
aesenclast RDK, IV4
aesenclast RDK, IV5
aesenclast RDK, IV6
vpxor (INPUT), IV0, BLK0
vpxor 16(INPUT), IV1, BLK1
vpxor 32(INPUT), IV2, BLK2
vpxor 48(INPUT), IV3, BLK3
vmovdqu BLK0, (OUTPUT)
vmovdqu BLK1, 16(OUTPUT)
vmovdqu BLK2, 32(OUTPUT)
vmovdqu BLK3, 48(OUTPUT)
vpxor 64(INPUT), IV4, BLK0
vpxor 80(INPUT), IV5, BLK1
vpxor 96(INPUT), IV6, BLK2
vmovdqu BLK0, 64(OUTPUT)
vmovdqu BLK1, 80(OUTPUT)
vmovdqu BLK2, 96(OUTPUT)
addl $7, %r11d
UPDATE_DATA INPUT, OUTPUT, LEN, 112
jmp .Lctr_enc_proc_8_blk
.Lctr_aesenc_finish:
bswap %r11d
movl %r11d, 12(CTR_IV)
vpxor IV0, IV0, IV0
vpxor IV1, IV1, IV1
vpxor IV2, IV2, IV2
vpxor IV3, IV3, IV3
vpxor IV4, IV4, IV4
vpxor IV5, IV5, IV5
vpxor IV6, IV6, IV6
vpxor IV7, IV7, IV7
vpxor RDK, RDK, RDK
vmovdqa IV0, 0(%rsp)
vmovdqa IV0, 16(%rsp)
vmovdqa IV0, 32(%rsp)
vmovdqa IV0, 48(%rsp)
vmovdqa IV0, 64(%rsp)
vmovdqa IV0, 80(%rsp)
vmovdqa IV0, 96(%rsp)
vmovdqa IV0, 112(%rsp)
movq %r12, %rsp
popq %r15
popq %r14
popq %r13
popq %r12
movl $0, RET
ret
.cfi_endproc
.size CRYPT_AES_CTR_Encrypt, .-CRYPT_AES_CTR_Encrypt
#endif
| 2302_82127028/openHiTLS-examples_1508 | crypto/aes/src/asm/crypt_aes_ctr_x86_64.S | Unix Assembly | unknown | 18,679 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#include "hitls_build.h"
#if defined(HITLS_CRYPTO_AES) && defined(HITLS_CRYPTO_ECB)
#include "crypt_arm.h"
#include "crypt_aes_macro_armv8.s"
.file "crypt_aes_ecb_armv8.S"
.text
.arch armv8-a+crypto
KEY .req x0
IN .req x1
OUT .req x2
LEN .req x3
KTMP .req x4
LTMP .req x9
ROUNDS .req w6
BLK0 .req v0
BLK1 .req v1
BLK2 .req v2
BLK3 .req v3
BLK4 .req v4
BLK5 .req v5
BLK6 .req v6
BLK7 .req v7
RDK0 .req v17
RDK1 .req v18
/*
* Vn - V0 ~ V31
* 8bytes - Vn.8B Vn.4H Vn.2S Vn.1D
* 16bytes - Vn.16B Vn.8H Vn.4S Vn.2D
*
* In Return-oriented programming (ROP) and Jump-oriented programming (JOP), we explored features
* that Arm introduced to the Arm architecture to mitigate against JOP-style and ROP-style attacks.
* ...
* Whether the combined or NOP-compatible instructions are generated depends on the architecture
* version that the code is built for. When building for Armv8.3-A, or later, the compiler will use
* the combined operations. When building for Armv8.2-A, or earlier, it will use the NOP compatible
* instructions.
* (https://developer.arm.com/documentation/102433/0100/Applying-these-techniques-to-real-code?lang=en)
*
* The paciasp and autiasp instructions are used for function pointer authentication. The pointer
* authentication feature is added in armv8.3 and is supported only by AArch64.
* The addition of pointer authentication features is described in Section A2.6.1 of
* DDI0487H_a_a-profile_architecture_reference_manual.pdf.
*/
/**
* Function description: Sets the AES encryption assembly acceleration interface in ECB mode.
* int32_t CRYPT_AES_ECB_Encrypt(const CRYPT_AES_Key *ctx,
* const uint8_t *in,
* uint8_t *out,
* uint32_t len);
* Input register:
* x0: Pointer to the input key structure.
* x1: Points to the 128-bit input data.
* x2: Points to the 128-bit output data.
* x3: Indicates the length of a data block, that is, 16 bytes.
* Change register: x4, x6, x9, v0-v7, v17, v18.
* Output register: x0.
* Function/Macro Call: AES_ENC_8_BLKS, AES_ENC_1_BLK, AES_ENC_2_BLKS, AES_ENC_4_BLKS,
* AES_ENC_5_BLKS, AES_ENC_6_BLKS, AES_ENC_7_BLKS.
*/
.globl CRYPT_AES_ECB_Encrypt
.type CRYPT_AES_ECB_Encrypt, %function
CRYPT_AES_ECB_Encrypt:
AARCH64_PACIASP
mov LTMP, LEN
.Lecb_aesenc_start:
cmp LTMP, #64
b.ge .Lecb_enc_above_equal_4_blks
cmp LTMP, #32
b.ge .Lecb_enc_above_equal_2_blks
cmp LTMP, #0
b.eq .Lecb_aesenc_finish
b .Lecb_enc_proc_1_blk
.Lecb_enc_above_equal_2_blks:
cmp LTMP, #48
b.lt .Lecb_enc_proc_2_blks
b .Lecb_enc_proc_3_blks
.Lecb_enc_above_equal_4_blks:
cmp LTMP, #96
b.ge .Lecb_enc_above_equal_6_blks
cmp LTMP, #80
b.lt .Lecb_enc_proc_4_blks
b .Lecb_enc_proc_5_blks
.Lecb_enc_above_equal_6_blks:
cmp LTMP, #112
b.lt .Lecb_enc_proc_6_blks
cmp LTMP, #128
b.lt .Lecb_enc_proc_7_blks
.Lecb_enc_proc_8_blks:
.Lecb_aesenc_8_blks_loop:
ld1 {BLK0.16b, BLK1.16b, BLK2.16b, BLK3.16b}, [IN], #64
ld1 {BLK4.16b, BLK5.16b, BLK6.16b, BLK7.16b}, [IN], #64
mov KTMP, KEY
AES_ENC_8_BLKS KTMP BLK0.16b BLK1.16b BLK2.16b BLK3.16b BLK4.16b \
BLK5.16b BLK6.16b BLK7.16b RDK0.4s RDK1.4s RDK0.16b RDK1.16b ROUNDS
st1 {BLK0.16b, BLK1.16b, BLK2.16b, BLK3.16b}, [OUT], #64
st1 {BLK4.16b, BLK5.16b, BLK6.16b, BLK7.16b}, [OUT], #64
sub LTMP, LTMP, #128
cmp LTMP, #128
b.lt .Lecb_aesenc_start
b .Lecb_aesenc_8_blks_loop
.Lecb_enc_proc_1_blk:
ld1 {BLK0.16b}, [IN]
AES_ENC_1_BLK KEY BLK0.16b RDK0.4s RDK1.4s RDK0.16b RDK1.16b ROUNDS
st1 {BLK0.16b}, [OUT]
b .Lecb_aesenc_finish
.Lecb_enc_proc_2_blks:
ld1 {BLK0.16b, BLK1.16b}, [IN]
AES_ENC_2_BLKS KEY BLK0.16b BLK1.16b RDK0.4s RDK1.4s RDK0.16b RDK1.16b ROUNDS
st1 {BLK0.16b, BLK1.16b}, [OUT]
b .Lecb_aesenc_finish
.Lecb_enc_proc_3_blks:
ld1 {BLK0.16b, BLK1.16b, BLK2.16b}, [IN]
AES_ENC_3_BLKS KEY BLK0.16b BLK1.16b BLK2.16b RDK0.4s RDK1.4s RDK0.16b RDK1.16b ROUNDS
st1 {BLK0.16b, BLK1.16b, BLK2.16b}, [OUT]
b .Lecb_aesenc_finish
.Lecb_enc_proc_4_blks:
ld1 {BLK0.16b, BLK1.16b, BLK2.16b, BLK3.16b}, [IN]
AES_ENC_4_BLKS KEY BLK0.16b BLK1.16b BLK2.16b BLK3.16b RDK0.4s RDK1.4s RDK0.16b RDK1.16b ROUNDS
st1 {BLK0.16b, BLK1.16b, BLK2.16b, BLK3.16b}, [OUT]
b .Lecb_aesenc_finish
.Lecb_enc_proc_5_blks:
ld1 {BLK0.16b, BLK1.16b, BLK2.16b, BLK3.16b}, [IN], #64
ld1 {BLK4.16b}, [IN]
AES_ENC_5_BLKS KEY BLK0.16b BLK1.16b BLK2.16b BLK3.16b BLK4.16b RDK0.4s RDK1.4s RDK0.16b RDK1.16b ROUNDS
st1 {BLK0.16b, BLK1.16b, BLK2.16b, BLK3.16b}, [OUT], #64
st1 {BLK4.16b}, [OUT]
b .Lecb_aesenc_finish
.Lecb_enc_proc_6_blks:
ld1 {BLK0.16b, BLK1.16b, BLK2.16b, BLK3.16b}, [IN], #64
ld1 {BLK4.16b, BLK5.16b}, [IN]
AES_ENC_6_BLKS KEY BLK0.16b BLK1.16b BLK2.16b BLK3.16b BLK4.16b BLK5.16b RDK0.4s RDK1.4s RDK0.16b RDK1.16b ROUNDS
st1 {BLK0.16b, BLK1.16b, BLK2.16b, BLK3.16b}, [OUT], #64
st1 {BLK4.16b, BLK5.16b}, [OUT]
b .Lecb_aesenc_finish
.Lecb_enc_proc_7_blks:
ld1 {BLK0.16b, BLK1.16b, BLK2.16b, BLK3.16b}, [IN], #64
ld1 {BLK4.16b, BLK5.16b, BLK6.16b}, [IN]
AES_ENC_7_BLKS KEY BLK0.16b BLK1.16b BLK2.16b BLK3.16b BLK4.16b BLK5.16b BLK6.16b RDK0.4s RDK1.4s RDK0.16b RDK1.16b ROUNDS
st1 {BLK0.16b, BLK1.16b, BLK2.16b, BLK3.16b}, [OUT], #64
st1 {BLK4.16b, BLK5.16b, BLK6.16b}, [OUT]
.Lecb_aesenc_finish:
mov x0, #0
eor RDK0.16b, RDK0.16b, RDK0.16b
eor RDK1.16b, RDK1.16b, RDK1.16b
AARCH64_AUTIASP
ret
.size CRYPT_AES_ECB_Encrypt, .-CRYPT_AES_ECB_Encrypt
/**
* Function description: Sets the AES decryption and assembly acceleration API in ECB mode.
* int32_t CRYPT_AES_ECB_Decrypt(const CRYPT_AES_Key *ctx,
* const uint8_t *in,
* uint8_t *out,
* uint32_t len);
* Input register:
* x0: Pointer to the input key structure.
* x1: Points to the 128-bit input data.
* x2: Points to the 128-bit output data.
* x3: Indicates the length of a data block, that is, 16 bytes.
* Change register: x4, x6, x9, v0-v7, v17, v18
* Output register: x0
* Function/Macro Call: AES_DEC_8_BLKS, AES_DEC_1_BLK, AES_DEC_2_BLKS, AES_DEC_4_BLKS,
* AES_DEC_5_BLKS, AES_DEC_6_BLKS, AES_DEC_7_BLKS.
*/
.globl CRYPT_AES_ECB_Decrypt
.type CRYPT_AES_ECB_Decrypt, %function
CRYPT_AES_ECB_Decrypt:
AARCH64_PACIASP
mov LTMP, LEN
.Lecb_aesdec_start:
cmp LTMP, #64
b.ge .Lecb_dec_above_equal_4_blks
cmp LTMP, #32
b.ge .Lecb_dec_above_equal_2_blks
cmp LTMP, #0
b.eq .Lecb_aesdec_finish
b .Lecb_dec_proc_1_blk
.Lecb_dec_above_equal_2_blks:
cmp LTMP, #48
b.lt .Lecb_dec_proc_2_blks
b .Lecb_dec_proc_3_blks
.Lecb_dec_above_equal_4_blks:
cmp LTMP, #96
b.ge .Lecb_dec_above_equal_6_blks
cmp LTMP, #80
b.lt .Lecb_dec_proc_4_blks
b .Lecb_dec_proc_5_blks
.Lecb_dec_above_equal_6_blks:
cmp LTMP, #112
b.lt .Lecb_dec_proc_6_blks
cmp LTMP, #128
b.lt .Lecb_dec_proc_7_blks
.Lecb_dec_proc_8_blks:
.Lecb_aesdec_8_blks_loop:
ld1 {BLK0.16b, BLK1.16b, BLK2.16b, BLK3.16b}, [IN], #64
ld1 {BLK4.16b, BLK5.16b, BLK6.16b, BLK7.16b}, [IN], #64
mov KTMP, KEY
AES_DEC_8_BLKS KTMP BLK0.16b BLK1.16b BLK2.16b BLK3.16b BLK4.16b \
BLK5.16b BLK6.16b BLK7.16b RDK0.4s RDK1.4s RDK0.16b RDK1.16b ROUNDS
st1 {BLK0.16b, BLK1.16b, BLK2.16b, BLK3.16b}, [OUT], #64
st1 {BLK4.16b, BLK5.16b, BLK6.16b, BLK7.16b}, [OUT], #64
sub LTMP, LTMP, #128
cmp LTMP, #128
b.lt .Lecb_aesdec_start
b .Lecb_aesdec_8_blks_loop
.Lecb_dec_proc_1_blk:
ld1 {BLK0.16b}, [IN]
AES_DEC_1_BLK KEY BLK0.16b RDK0.4s RDK1.4s RDK0.16b RDK1.16b ROUNDS
st1 {BLK0.16b}, [OUT]
b .Lecb_aesdec_finish
.Lecb_dec_proc_2_blks:
ld1 {BLK0.16b, BLK1.16b}, [IN]
AES_DEC_2_BLKS KEY BLK0.16b BLK1.16b RDK0.4s RDK1.4s RDK0.16b RDK1.16b ROUNDS
st1 {BLK0.16b, BLK1.16b}, [OUT]
b .Lecb_aesdec_finish
.Lecb_dec_proc_3_blks:
ld1 {BLK0.16b, BLK1.16b, BLK2.16b}, [IN]
AES_DEC_3_BLKS KEY BLK0.16b BLK1.16b BLK2.16b RDK0.4s RDK1.4s RDK0.16b RDK1.16b ROUNDS
st1 {BLK0.16b, BLK1.16b, BLK2.16b}, [OUT]
b .Lecb_aesdec_finish
.Lecb_dec_proc_4_blks:
ld1 {BLK0.16b, BLK1.16b, BLK2.16b, BLK3.16b}, [IN]
AES_DEC_4_BLKS KEY BLK0.16b BLK1.16b BLK2.16b BLK3.16b RDK0.4s RDK1.4s RDK0.16b RDK1.16b ROUNDS
st1 {BLK0.16b, BLK1.16b, BLK2.16b, BLK3.16b}, [OUT]
b .Lecb_aesdec_finish
.Lecb_dec_proc_5_blks:
ld1 {BLK0.16b, BLK1.16b, BLK2.16b, BLK3.16b}, [IN], #64
ld1 {BLK4.16b}, [IN]
AES_DEC_5_BLKS KEY BLK0.16b BLK1.16b BLK2.16b BLK3.16b BLK4.16b RDK0.4s RDK1.4s RDK0.16b RDK1.16b ROUNDS
st1 {BLK0.16b, BLK1.16b, BLK2.16b, BLK3.16b}, [OUT], #64
st1 {BLK4.16b}, [OUT]
b .Lecb_aesdec_finish
.Lecb_dec_proc_6_blks:
ld1 {BLK0.16b, BLK1.16b, BLK2.16b, BLK3.16b}, [IN], #64
ld1 {BLK4.16b, BLK5.16b}, [IN]
AES_DEC_6_BLKS KEY BLK0.16b BLK1.16b BLK2.16b BLK3.16b BLK4.16b BLK5.16b RDK0.4s RDK1.4s RDK0.16b RDK1.16b ROUNDS
st1 {BLK0.16b, BLK1.16b, BLK2.16b, BLK3.16b}, [OUT], #64
st1 {BLK4.16b, BLK5.16b}, [OUT]
b .Lecb_aesdec_finish
.Lecb_dec_proc_7_blks:
ld1 {BLK0.16b, BLK1.16b, BLK2.16b, BLK3.16b}, [IN], #64
ld1 {BLK4.16b, BLK5.16b, BLK6.16b}, [IN]
AES_DEC_7_BLKS KEY BLK0.16b BLK1.16b BLK2.16b BLK3.16b BLK4.16b BLK5.16b BLK6.16b RDK0.4s RDK1.4s RDK0.16b RDK1.16b ROUNDS
st1 {BLK0.16b, BLK1.16b, BLK2.16b, BLK3.16b}, [OUT], #64
st1 {BLK4.16b, BLK5.16b, BLK6.16b}, [OUT]
.Lecb_aesdec_finish:
mov x0, #0
eor RDK0.16b, RDK0.16b, RDK0.16b
eor RDK1.16b, RDK1.16b, RDK1.16b
AARCH64_AUTIASP
ret
.size CRYPT_AES_ECB_Decrypt, .-CRYPT_AES_ECB_Decrypt
#endif
| 2302_82127028/openHiTLS-examples_1508 | crypto/aes/src/asm/crypt_aes_ecb_armv8.S | Unix Assembly | unknown | 10,519 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#include "hitls_build.h"
#if defined(HITLS_CRYPTO_AES) && defined(HITLS_CRYPTO_ECB)
#include "crypt_aes_macro_x86_64.s"
.file "crypt_aes_ecb_x86_64.S"
.text
.set ARG1, %rdi
.set ARG2, %rsi
.set ARG3, %rdx
.set ARG4, %ecx
.set ARG5, %r8
.set ARG6, %r9
.set RDK, %xmm3
.set KEY, %rdi
.set KTMP, %r9
.set ROUNDS, %eax
.set RET, %eax
.set BLK0, %xmm1
.set BLK1, %xmm4
.set BLK2, %xmm5
.set BLK3, %xmm6
.set BLK4, %xmm10
.set BLK5, %xmm11
.set BLK6, %xmm12
.set BLK7, %xmm13
.set BLK8, %xmm0
.set BLK9, %xmm2
.set BLK10, %xmm7
.set BLK11, %xmm8
.set BLK12, %xmm9
.set BLK13, %xmm14
/**
* Function description: Sets the AES encryption assembly acceleration API in ECB mode.
* Function prototype: int32_t CRYPT_AES_ECB_Encrypt(const CRYPT_AES_Key *ctx,
* const uint8_t *in, uint8_t *out, uint32_t len);
* Input register:
* x0: Pointer to the input key structure.
* x1: Points to the 128-bit input data.
* x2: Points to the 128-bit output data.
* x3: Indicates the length of a data block, that is, 16 bytes.
* Change register: xmm1,xmm3,xmm4,xmm5,xmm6,xmm10,xmm11,xmm12,xmm13.
* Output register: eax.
* Function/Macro Call: None.
*/
.globl CRYPT_AES_ECB_Encrypt
.type CRYPT_AES_ECB_Encrypt, @function
CRYPT_AES_ECB_Encrypt:
.cfi_startproc
.align 16
.Lecb_aesenc_start:
cmpl $64, ARG4
jae .Lecb_enc_above_equal_4_blks
cmpl $32, ARG4
jae .Lecb_enc_above_equal_2_blks
cmpl $0, ARG4
je .Lecb_aesdec_finish
jmp .Lecb_enc_proc_1_blk
.Lecb_enc_above_equal_2_blks:
cmpl $48, ARG4
jb .Lecb_enc_proc_2_blks
jmp .Lecb_enc_proc_3_blks
.Lecb_enc_above_equal_4_blks:
cmpl $96, ARG4
jae .Lecb_enc_above_equal_6_blks
cmpl $80, ARG4
jb .Lecb_enc_proc_4_blks
jmp .Lecb_enc_proc_5_blks
.Lecb_enc_above_equal_6_blks:
cmpl $112, ARG4
jb .Lecb_enc_proc_6_blks
cmpl $128, ARG4
jb .Lecb_enc_proc_7_blks
cmpl $256, ARG4
jbe .Lecb_enc_proc_8_blks
.align 16
.ecb_enc_proc_14_blks:
.Lecb_aesenc_14_blks_loop:
movq KEY, KTMP
vmovdqu (KEY), RDK
movl 240(KEY), ROUNDS
vpxor (ARG2), RDK, BLK0
vpxor 16(ARG2), RDK, BLK1
vpxor 32(ARG2), RDK, BLK2
vpxor 48(ARG2), RDK, BLK3
vpxor 64(ARG2), RDK, BLK4
vpxor 80(ARG2), RDK, BLK5
vpxor 96(ARG2), RDK, BLK6
vpxor 112(ARG2), RDK, BLK7
vpxor 128(ARG2), RDK, BLK8
vpxor 144(ARG2), RDK, BLK9
vpxor 160(ARG2), RDK, BLK10
vpxor 176(ARG2), RDK, BLK11
vpxor 192(ARG2), RDK, BLK12
vpxor 208(ARG2), RDK, BLK13
decl ROUNDS
AES_ENC_14_BLKS ARG2 KTMP ROUNDS RDK BLK0 BLK1 BLK2 BLK3 BLK4 BLK5 BLK6 BLK7 BLK8 BLK9 BLK10 BLK11 BLK12 BLK13
vmovdqu BLK0, (ARG3)
vmovdqu BLK1, 16(ARG3)
vmovdqu BLK2, 32(ARG3)
vmovdqu BLK3, 48(ARG3)
vmovdqu BLK4, 64(ARG3)
vmovdqu BLK5, 80(ARG3)
vmovdqu BLK6, 96(ARG3)
vmovdqu BLK7, 112(ARG3)
vmovdqu BLK8, 128(ARG3)
vmovdqu BLK9, 144(ARG3)
vmovdqu BLK10, 160(ARG3)
vmovdqu BLK11, 176(ARG3)
vmovdqu BLK12, 192(ARG3)
vmovdqu BLK13, 208(ARG3)
leaq 224(ARG2), ARG2
leaq 224(ARG3), ARG3
subl $224, ARG4
cmpl $224, ARG4
jb .Lecb_aesenc_start
jmp .Lecb_aesenc_14_blks_loop
.align 16
.Lecb_enc_proc_8_blks:
.Lecb_aesenc_8_blks_loop:
movl 240(KEY), ROUNDS
vmovdqu (KEY), RDK
movq KEY, KTMP
vpxor (ARG2), RDK, BLK0
vpxor 16(ARG2), RDK, BLK1
vpxor 32(ARG2), RDK, BLK2
vpxor 48(ARG2), RDK, BLK3
vpxor 64(ARG2), RDK, BLK4
vpxor 80(ARG2), RDK, BLK5
vpxor 96(ARG2), RDK, BLK6
vpxor 112(ARG2), RDK, BLK7
decl ROUNDS
AES_ENC_8_BLKS KTMP ROUNDS RDK BLK0 BLK1 BLK2 BLK3 BLK4 BLK5 BLK6 BLK7
vmovdqu BLK0, (ARG3)
vmovdqu BLK1, 16(ARG3)
vmovdqu BLK2, 32(ARG3)
vmovdqu BLK3, 48(ARG3)
vmovdqu BLK4, 64(ARG3)
vmovdqu BLK5, 80(ARG3)
vmovdqu BLK6, 96(ARG3)
vmovdqu BLK7, 112(ARG3)
leaq 128(ARG2), ARG2
leaq 128(ARG3), ARG3
subl $128, ARG4
cmpl $128, ARG4
jb .Lecb_aesenc_start
jmp .Lecb_aesenc_8_blks_loop
.align 16
.Lecb_enc_proc_1_blk:
movl 240(KEY), ROUNDS
vmovdqu (KEY), RDK
vpxor (ARG2), RDK, BLK0
decl ROUNDS
AES_ENC_1_BLK KEY ROUNDS RDK BLK0
vmovdqu BLK0, (ARG3)
jmp .Lecb_aesenc_finish
.align 16
.Lecb_enc_proc_2_blks:
movl 240(KEY), ROUNDS
vmovdqu (KEY), RDK
vpxor (ARG2), RDK, BLK0
vpxor 16(ARG2), RDK, BLK1
decl ROUNDS
AES_ENC_2_BLKS KEY ROUNDS RDK BLK0 BLK1
vmovdqu BLK0, (ARG3)
vmovdqu BLK1, 16(ARG3)
jmp .Lecb_aesenc_finish
.align 16
.Lecb_enc_proc_3_blks:
movl 240(KEY), ROUNDS
vmovdqu (KEY), RDK
vpxor (ARG2), RDK, BLK0
vpxor 16(ARG2), RDK, BLK1
vpxor 32(ARG2), RDK, BLK2
decl ROUNDS
AES_ENC_3_BLKS KEY ROUNDS RDK BLK0 BLK1 BLK2
vmovdqu BLK0, (ARG3)
vmovdqu BLK1, 16(ARG3)
vmovdqu BLK2, 32(ARG3)
jmp .Lecb_aesenc_finish
.align 16
.Lecb_enc_proc_4_blks:
movl 240(KEY), ROUNDS
vmovdqu (KEY), RDK
vpxor (ARG2), RDK, BLK0
vpxor 16(ARG2), RDK, BLK1
vpxor 32(ARG2), RDK, BLK2
vpxor 48(ARG2), RDK, BLK3
decl ROUNDS
AES_ENC_4_BLKS KEY ROUNDS RDK BLK0 BLK1 BLK2 BLK3
vmovdqu BLK0, (ARG3)
vmovdqu BLK1, 16(ARG3)
vmovdqu BLK2, 32(ARG3)
vmovdqu BLK3, 48(ARG3)
jmp .Lecb_aesenc_finish
.align 16
.Lecb_enc_proc_5_blks:
movl 240(KEY), ROUNDS
vmovdqu (KEY), RDK
vpxor (ARG2), RDK, BLK0
vpxor 16(ARG2), RDK, BLK1
vpxor 32(ARG2), RDK, BLK2
vpxor 48(ARG2), RDK, BLK3
vpxor 64(ARG2), RDK, BLK4
decl ROUNDS
AES_ENC_5_BLKS KEY ROUNDS RDK BLK0 BLK1 BLK2 BLK3 BLK4
vmovdqu BLK0, (ARG3)
vmovdqu BLK1, 16(ARG3)
vmovdqu BLK2, 32(ARG3)
vmovdqu BLK3, 48(ARG3)
vmovdqu BLK4, 64(ARG3)
jmp .Lecb_aesenc_finish
.align 16
.Lecb_enc_proc_6_blks:
movl 240(KEY), ROUNDS
vmovdqu (KEY), RDK
vpxor (ARG2), RDK, BLK0
vpxor 16(ARG2), RDK, BLK1
vpxor 32(ARG2), RDK, BLK2
vpxor 48(ARG2), RDK, BLK3
vpxor 64(ARG2), RDK, BLK4
vpxor 80(ARG2), RDK, BLK5
decl ROUNDS
AES_ENC_6_BLKS KEY ROUNDS RDK BLK0 BLK1 BLK2 BLK3 BLK4 BLK5
vmovdqu BLK0, (ARG3)
vmovdqu BLK1, 16(ARG3)
vmovdqu BLK2, 32(ARG3)
vmovdqu BLK3, 48(ARG3)
vmovdqu BLK4, 64(ARG3)
vmovdqu BLK5, 80(ARG3)
jmp .Lecb_aesenc_finish
.align 16
.Lecb_enc_proc_7_blks:
movl 240(KEY), ROUNDS
vmovdqu (KEY), RDK
vpxor (ARG2), RDK, BLK0
vpxor 16(ARG2), RDK, BLK1
vpxor 32(ARG2), RDK, BLK2
vpxor 48(ARG2), RDK, BLK3
vpxor 64(ARG2), RDK, BLK4
vpxor 80(ARG2), RDK, BLK5
vpxor 96(ARG2), RDK, BLK6
decl ROUNDS
AES_ENC_7_BLKS KEY ROUNDS RDK BLK0 BLK1 BLK2 BLK3 BLK4 BLK5 BLK6
vmovdqu BLK0, (ARG3)
vmovdqu BLK1, 16(ARG3)
vmovdqu BLK2, 32(ARG3)
vmovdqu BLK3, 48(ARG3)
vmovdqu BLK4, 64(ARG3)
vmovdqu BLK5, 80(ARG3)
vmovdqu BLK6, 96(ARG3)
.align 16
.Lecb_aesenc_finish:
vpxor RDK, RDK, RDK
movl $0, RET
ret
.cfi_endproc
.size CRYPT_AES_ECB_Encrypt, .-CRYPT_AES_ECB_Encrypt
/**
* Function description: Sets the AES decryption and assembly acceleration API in ECB mode.
* Function prototype: int32_t CRYPT_AES_ECB_Decrypt(const CRYPT_AES_Key *ctx,
* const uint8_t *in, uint8_t *out, uint32_t len);
* Input register:
* x0: Pointer to the input key structure.
* x1: Points to the 128-bit input data.
* x2: Indicates the 128-bit output data.
* x3: Indicates the length of a data block, that is, 16 bytes.
* Change register: xmm1,xmm3,xmm4,xmm5,xmm6,xmm10,xmm11,xmm12,xmm13.
* Output register: eax.
* Function/Macro Call: None.
*/
.globl CRYPT_AES_ECB_Decrypt
.type CRYPT_AES_ECB_Decrypt, @function
CRYPT_AES_ECB_Decrypt:
.cfi_startproc
.align 16
.ecb_aesdec_start:
cmpl $64, ARG4
jae .ecb_dec_above_equal_4_blks
cmpl $32, ARG4
jae .ecb_dec_above_equal_2_blks
cmpl $0, ARG4
je .Lecb_aesdec_finish
jmp .ecb_dec_proc_1_blk
.ecb_dec_above_equal_2_blks:
cmpl $48, ARG4
jb .ecb_dec_proc_2_blks
jmp .ecb_dec_proc_3_blks
.ecb_dec_above_equal_4_blks:
cmpl $96, ARG4
jae .ecb_dec_above_equal_6_blks
cmpl $80, ARG4
jb .ecb_dec_proc_4_blks
jmp .ecb_dec_proc_5_blks
.ecb_dec_above_equal_6_blks:
cmpl $112, ARG4
jb .ecb_dec_proc_6_blks
cmpl $128, ARG4
jb .ecb_dec_proc_7_blks
cmpl $256, ARG4
jbe .ecb_dec_proc_8_blks
.align 16
.ecb_dec_proc_14_blks:
.ecb_aesdec_14_blks_loop:
movq KEY, KTMP
movl 240(KEY), ROUNDS
vmovdqu (KEY), RDK
vpxor (ARG2), RDK, BLK0
vpxor 16(ARG2), RDK, BLK1
vpxor 32(ARG2), RDK, BLK2
vpxor 48(ARG2), RDK, BLK3
vpxor 64(ARG2), RDK, BLK4
vpxor 80(ARG2), RDK, BLK5
vpxor 96(ARG2), RDK, BLK6
vpxor 112(ARG2), RDK, BLK7
vpxor 128(ARG2), RDK, BLK8
vpxor 144(ARG2), RDK, BLK9
vpxor 160(ARG2), RDK, BLK10
vpxor 176(ARG2), RDK, BLK11
vpxor 192(ARG2), RDK, BLK12
vpxor 208(ARG2), RDK, BLK13
decl ROUNDS
AES_DEC_14_BLKS KTMP ROUNDS RDK BLK0 BLK1 BLK2 BLK3 BLK4 BLK5 BLK6 BLK7 BLK8 BLK9 BLK10 BLK11 BLK12 BLK13
vmovdqu BLK0, (ARG3)
vmovdqu BLK1, 16(ARG3)
vmovdqu BLK2, 32(ARG3)
vmovdqu BLK3, 48(ARG3)
vmovdqu BLK4, 64(ARG3)
vmovdqu BLK5, 80(ARG3)
vmovdqu BLK6, 96(ARG3)
vmovdqu BLK7, 112(ARG3)
vmovdqu BLK8, 128(ARG3)
vmovdqu BLK9, 144(ARG3)
vmovdqu BLK10, 160(ARG3)
vmovdqu BLK11, 176(ARG3)
vmovdqu BLK12, 192(ARG3)
vmovdqu BLK13, 208(ARG3)
leaq 224(ARG2), ARG2
leaq 224(ARG3), ARG3
subl $224, ARG4
cmpl $224, ARG4
jb .ecb_aesdec_start
jmp .ecb_aesdec_14_blks_loop
.align 16
.ecb_dec_proc_8_blks:
.aesecbdec_8_blks_loop:
movq KEY, KTMP
movl 240(KEY), ROUNDS
vmovdqu (KEY), RDK
vpxor (ARG2), RDK, BLK0
vpxor 16(ARG2), RDK, BLK1
vpxor 32(ARG2), RDK, BLK2
vpxor 48(ARG2), RDK, BLK3
vpxor 64(ARG2), RDK, BLK4
vpxor 80(ARG2), RDK, BLK5
vpxor 96(ARG2), RDK, BLK6
vpxor 112(ARG2), RDK, BLK7
decl ROUNDS
AES_DEC_8_BLKS KTMP ROUNDS RDK BLK0 BLK1 BLK2 BLK3 BLK4 BLK5 BLK6 BLK7
vmovdqu BLK0, (ARG3)
vmovdqu BLK1, 16(ARG3)
vmovdqu BLK2, 32(ARG3)
vmovdqu BLK3, 48(ARG3)
vmovdqu BLK4, 64(ARG3)
vmovdqu BLK5, 80(ARG3)
vmovdqu BLK6, 96(ARG3)
vmovdqu BLK7, 112(ARG3)
leaq 128(ARG2), ARG2
leaq 128(ARG3), ARG3
subl $128, ARG4
cmpl $128, ARG4
jb .ecb_aesdec_start
jmp .aesecbdec_8_blks_loop
.align 16
.ecb_dec_proc_1_blk:
movl 240(KEY), ROUNDS
vmovdqu (KEY), RDK
vpxor (ARG2), RDK, BLK0
decl ROUNDS
AES_DEC_1_BLK KEY ROUNDS RDK BLK0
vmovdqu BLK0, (ARG3)
jmp .Lecb_aesdec_finish
.align 16
.ecb_dec_proc_2_blks:
movl 240(KEY), ROUNDS
vmovdqu (KEY), RDK
vpxor (ARG2), RDK, BLK0
vpxor 16(ARG2), RDK, BLK1
decl ROUNDS
AES_DEC_2_BLKS KEY ROUNDS RDK BLK0 BLK1
vmovdqu BLK0, (ARG3)
vmovdqu BLK1, 16(ARG3)
jmp .Lecb_aesdec_finish
.align 16
.ecb_dec_proc_3_blks:
movl 240(KEY), ROUNDS
vmovdqu (KEY), RDK
vpxor (ARG2), RDK, BLK0
vpxor 16(ARG2), RDK, BLK1
vpxor 32(ARG2), RDK, BLK2
decl ROUNDS
AES_DEC_3_BLKS KEY ROUNDS RDK BLK0 BLK1 BLK2
vmovdqu BLK0, (ARG3)
vmovdqu BLK1, 16(ARG3)
vmovdqu BLK2, 32(ARG3)
jmp .Lecb_aesdec_finish
.align 16
.ecb_dec_proc_4_blks:
movl 240(KEY), ROUNDS
vmovdqu (KEY), RDK
vpxor (ARG2), RDK, BLK0
vpxor 16(ARG2), RDK, BLK1
vpxor 32(ARG2), RDK, BLK2
vpxor 48(ARG2), RDK, BLK3
decl ROUNDS
AES_DEC_4_BLKS KEY ROUNDS RDK BLK0 BLK1 BLK2 BLK3
vmovdqu BLK0, (ARG3)
vmovdqu BLK1, 16(ARG3)
vmovdqu BLK2, 32(ARG3)
vmovdqu BLK3, 48(ARG3)
jmp .Lecb_aesdec_finish
.align 16
.ecb_dec_proc_5_blks:
movl 240(KEY), ROUNDS
vmovdqu (KEY), RDK
vpxor (ARG2), RDK, BLK0
vpxor 16(ARG2), RDK, BLK1
vpxor 32(ARG2), RDK, BLK2
vpxor 48(ARG2), RDK, BLK3
vpxor 64(ARG2), RDK, BLK4
decl ROUNDS
AES_DEC_5_BLKS KEY ROUNDS RDK BLK0 BLK1 BLK2 BLK3 BLK4
vmovdqu BLK0, (ARG3)
vmovdqu BLK1, 16(ARG3)
vmovdqu BLK2, 32(ARG3)
vmovdqu BLK3, 48(ARG3)
vmovdqu BLK4, 64(ARG3)
jmp .Lecb_aesdec_finish
.align 16
.ecb_dec_proc_6_blks:
movl 240(KEY), ROUNDS
vmovdqu (KEY), RDK
vpxor (ARG2), RDK, BLK0
vpxor 16(ARG2), RDK, BLK1
vpxor 32(ARG2), RDK, BLK2
vpxor 48(ARG2), RDK, BLK3
vpxor 64(ARG2), RDK, BLK4
vpxor 80(ARG2), RDK, BLK5
decl ROUNDS
AES_DEC_6_BLKS KEY ROUNDS RDK BLK0 BLK1 BLK2 BLK3 BLK4 BLK5
vmovdqu BLK0, (ARG3)
vmovdqu BLK1, 16(ARG3)
vmovdqu BLK2, 32(ARG3)
vmovdqu BLK3, 48(ARG3)
vmovdqu BLK4, 64(ARG3)
vmovdqu BLK5, 80(ARG3)
jmp .Lecb_aesdec_finish
.align 16
.ecb_dec_proc_7_blks:
movl 240(KEY), ROUNDS
vmovdqu (KEY), RDK
vpxor (ARG2), RDK, BLK0
vpxor 16(ARG2), RDK, BLK1
vpxor 32(ARG2), RDK, BLK2
vpxor 48(ARG2), RDK, BLK3
vpxor 64(ARG2), RDK, BLK4
vpxor 80(ARG2), RDK, BLK5
vpxor 96(ARG2), RDK, BLK6
decl ROUNDS
AES_DEC_7_BLKS KEY ROUNDS RDK BLK0 BLK1 BLK2 BLK3 BLK4 BLK5 BLK6
vmovdqu BLK0, (ARG3)
vmovdqu BLK1, 16(ARG3)
vmovdqu BLK2, 32(ARG3)
vmovdqu BLK3, 48(ARG3)
vmovdqu BLK4, 64(ARG3)
vmovdqu BLK5, 80(ARG3)
vmovdqu BLK6, 96(ARG3)
.align 16
.Lecb_aesdec_finish:
vpxor BLK0, BLK0, BLK0
vpxor BLK1, BLK1, BLK1
vpxor BLK2, BLK2, BLK2
vpxor BLK3, BLK3, BLK3
vpxor BLK4, BLK4, BLK4
vpxor BLK5, BLK5, BLK5
vpxor BLK6, BLK6, BLK6
vpxor BLK7, BLK7, BLK7
vpxor BLK8, BLK8, BLK8
vpxor BLK9, BLK9, BLK9
vpxor BLK10, BLK10, BLK10
vpxor BLK11, BLK11, BLK11
vpxor BLK12, BLK12, BLK12
vpxor BLK13, BLK13, BLK13
vpxor RDK, RDK, RDK
movl $0, RET
ret
.cfi_endproc
.size CRYPT_AES_ECB_Decrypt, .-CRYPT_AES_ECB_Decrypt
#endif
| 2302_82127028/openHiTLS-examples_1508 | crypto/aes/src/asm/crypt_aes_ecb_x86_64.S | Unix Assembly | unknown | 14,681 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#include "hitls_build.h"
#ifdef HITLS_CRYPTO_AES
.file "crypt_aes_macro_armv8.s"
.text
.arch armv8-a+crypto
BLK0 .req v0
/*
* AES_ENC_1_BLKS
*/
.macro AES_ENC_1_BLK key blk rdk0s rdk1s rdk0 rdk1 rounds
ldr \rounds,[\key,#240]
ld1 {\rdk0s,\rdk1s},[\key],#32
sub \rounds,\rounds,#2
.Loop_enc:
aese \blk,\rdk0
aesmc \blk,\blk
subs \rounds,\rounds,#2
ld1 {\rdk0s},[\key],#16
aese \blk,\rdk1
aesmc \blk,\blk
ld1 {\rdk1s},[\key],#16
b.gt .Loop_enc
aese \blk,\rdk0
aesmc \blk,\blk
ld1 {\rdk0s},[\key]
aese \blk,\rdk1
eor \blk,\blk,\rdk0
.endm
/*
* AES_DEC_1_BLKS
*/
.macro AES_DEC_1_BLK key blk rdk0s rdk1s rdk0 rdk1 rounds
ldr \rounds,[\key,#240]
ld1 {\rdk0s,\rdk1s},[\key],#32
sub \rounds,\rounds,#2
.Loop_dec:
aesd \blk,\rdk0
aesimc \blk,\blk
subs \rounds,\rounds,#2
ld1 {\rdk0s},[\key],#16
aesd \blk,\rdk1
aesimc \blk,\blk
ld1 {\rdk1s},[\key],#16
b.gt .Loop_dec
aesd \blk,\rdk0
aesimc \blk,\blk
ld1 {\rdk0s},[\key]
aesd \blk,\rdk1
eor \blk,\blk,\rdk0
.endm
.macro SETDECKEY_LDR_9_BLOCK PTR
ld1 {v1.4s}, [\PTR], #16
ld1 {v2.4s}, [\PTR], #16
ld1 {v3.4s}, [\PTR], #16
ld1 {v4.4s}, [\PTR], #16
ld1 {v5.4s}, [\PTR], #16
ld1 {v6.4s}, [\PTR], #16
ld1 {v7.4s}, [\PTR], #16
ld1 {v8.4s}, [\PTR], #16
ld1 {v9.4s}, [\PTR], #16
.endm
.macro SETDECKEY_INVMIX_9_BLOCK
aesimc v1.16b, v1.16b
aesimc v2.16b, v2.16b
aesimc v3.16b, v3.16b
aesimc v4.16b, v4.16b
aesimc v5.16b, v5.16b
aesimc v6.16b, v6.16b
aesimc v7.16b, v7.16b
aesimc v8.16b, v8.16b
aesimc v9.16b, v9.16b
.endm
.macro SETDECKEY_STR_9_BLOCK PTR OFFSETREG
st1 {v1.4s}, [\PTR], \OFFSETREG
st1 {v2.4s}, [\PTR], \OFFSETREG
st1 {v3.4s}, [\PTR], \OFFSETREG
st1 {v4.4s}, [\PTR], \OFFSETREG
st1 {v5.4s}, [\PTR], \OFFSETREG
st1 {v6.4s}, [\PTR], \OFFSETREG
st1 {v7.4s}, [\PTR], \OFFSETREG
st1 {v8.4s}, [\PTR], \OFFSETREG
st1 {v9.4s}, [\PTR], \OFFSETREG
.endm
/*
* AES_ENC_2_BLKS
*/
.macro AES_ENC_2_BLKS key blk0 blk1 rdk0s rdk1s rdk0 rdk1 rounds
ldr \rounds,[\key,#240]
ld1 {\rdk0s,\rdk1s},[\key],#32
sub \rounds,\rounds,#2
.Loop_enc_2_blks:
aese \blk0,\rdk0
aesmc \blk0,\blk0
aese \blk0,\rdk1
aesmc \blk0,\blk0
aese \blk1,\rdk0
aesmc \blk1,\blk1
aese \blk1,\rdk1
aesmc \blk1,\blk1
ld1 {\rdk0s,\rdk1s},[\key],#32
subs \rounds,\rounds,#2
b.gt .Loop_enc_2_blks
aese \blk0,\rdk0
aesmc \blk0,\blk0
aese \blk1,\rdk0
aesmc \blk1,\blk1
ld1 {\rdk0s},[\key]
aese \blk0,\rdk1
aese \blk1,\rdk1
eor \blk0,\blk0,\rdk0
eor \blk1,\blk1,\rdk0
.endm
/*
* AES_ENC_3_BLKS
*/
.macro AES_ENC_3_BLKS key blk0 blk1 blk2 rdk0s rdk1s rdk0 rdk1 rounds
ldr \rounds,[\key,#240]
ld1 {\rdk0s,\rdk1s},[\key],#32
sub \rounds,\rounds,#2
.align 3
.Loop_enc_3_blks:
aese \blk0,\rdk0
aesmc \blk0,\blk0
aese \blk0,\rdk1
aesmc \blk0,\blk0
aese \blk1,\rdk0
aesmc \blk1,\blk1
aese \blk1,\rdk1
aesmc \blk1,\blk1
aese \blk2,\rdk0
aesmc \blk2,\blk2
aese \blk2,\rdk1
aesmc \blk2,\blk2
ld1 {\rdk0s,\rdk1s},[\key],#32
subs \rounds,\rounds,#2
b.gt .Loop_enc_3_blks
aese \blk0,\rdk0
aesmc \blk0,\blk0
aese \blk1,\rdk0
aesmc \blk1,\blk1
aese \blk2,\rdk0
aesmc \blk2,\blk2
ld1 {\rdk0s},[\key]
aese \blk0,\rdk1
aese \blk1,\rdk1
aese \blk2,\rdk1
eor \blk0,\blk0,\rdk0
eor \blk1,\blk1,\rdk0
eor \blk2,\blk2,\rdk0
.endm
/*
* AES_ENC_4_BLKS
*/
.macro AES_ENC_4_BLKS key blk0 blk1 blk2 blk3 rdk0s rdk1s rdk0 rdk1 rounds
ldr \rounds,[\key,#240]
ld1 {\rdk0s,\rdk1s},[\key],#32
sub \rounds,\rounds,#2
.Loop_enc_4_blks:
aese \blk0,\rdk0
aesmc \blk0,\blk0
aese \blk0,\rdk1
aesmc \blk0,\blk0
aese \blk1,\rdk0
aesmc \blk1,\blk1
aese \blk1,\rdk1
aesmc \blk1,\blk1
aese \blk2,\rdk0
aesmc \blk2,\blk2
aese \blk2,\rdk1
aesmc \blk2,\blk2
aese \blk3,\rdk0
aesmc \blk3,\blk3
aese \blk3,\rdk1
aesmc \blk3,\blk3
ld1 {\rdk0s,\rdk1s},[\key],#32
subs \rounds,\rounds,#2
b.gt .Loop_enc_4_blks
aese \blk0,\rdk0
aesmc \blk0,\blk0
aese \blk1,\rdk0
aesmc \blk1,\blk1
aese \blk2,\rdk0
aesmc \blk2,\blk2
aese \blk3,\rdk0
aesmc \blk3,\blk3
ld1 {\rdk0s},[\key]
aese \blk0,\rdk1
aese \blk1,\rdk1
aese \blk2,\rdk1
aese \blk3,\rdk1
eor \blk0,\blk0,\rdk0
eor \blk1,\blk1,\rdk0
eor \blk2,\blk2,\rdk0
eor \blk3,\blk3,\rdk0
.endm
/*
* AES_ENC_5_BLKS
*/
.macro AES_ENC_5_BLKS key blk0 blk1 blk2 blk3 blk4 rdk0s rdk1s rdk0 rdk1 rounds
ldr \rounds,[\key,#240]
ld1 {\rdk0s,\rdk1s},[\key],#32
sub \rounds,\rounds,#2
.Loop_enc_5_blks:
aese \blk0,\rdk0
aesmc \blk0,\blk0
aese \blk1,\rdk0
aesmc \blk1,\blk1
aese \blk2,\rdk0
aesmc \blk2,\blk2
aese \blk3,\rdk0
aesmc \blk3,\blk3
aese \blk4,\rdk0
aesmc \blk4,\blk4
ld1 {\rdk0s},[\key],#16
subs \rounds,\rounds,#2
aese \blk0,\rdk1
aesmc \blk0,\blk0
aese \blk1,\rdk1
aesmc \blk1,\blk1
aese \blk2,\rdk1
aesmc \blk2,\blk2
aese \blk3,\rdk1
aesmc \blk3,\blk3
aese \blk4,\rdk1
aesmc \blk4,\blk4
ld1 {\rdk1s},[\key],#16
b.gt .Loop_enc_5_blks
aese \blk0,\rdk0
aesmc \blk0,\blk0
aese \blk1,\rdk0
aesmc \blk1,\blk1
aese \blk2,\rdk0
aesmc \blk2,\blk2
aese \blk3,\rdk0
aesmc \blk3,\blk3
aese \blk4,\rdk0
aesmc \blk4,\blk4
ld1 {\rdk0s},[\key]
aese \blk0,\rdk1
aese \blk1,\rdk1
aese \blk2,\rdk1
aese \blk3,\rdk1
aese \blk4,\rdk1
eor \blk0,\blk0,\rdk0
eor \blk1,\blk1,\rdk0
eor \blk2,\blk2,\rdk0
eor \blk3,\blk3,\rdk0
eor \blk4,\blk4,\rdk0
.endm
/*
* AES_ENC_6_BLKS
*/
.macro AES_ENC_6_BLKS key blk0 blk1 blk2 blk3 blk4 blk5 rdk0s rdk1s rdk0 rdk1 rounds
ldr \rounds,[\key,#240]
ld1 {\rdk0s,\rdk1s},[\key],#32
sub \rounds,\rounds,#2
.Loop_enc_6_blks:
aese \blk0,\rdk0
aesmc \blk0,\blk0
aese \blk0,\rdk1
aesmc \blk0,\blk0
aese \blk1,\rdk0
aesmc \blk1,\blk1
aese \blk1,\rdk1
aesmc \blk1,\blk1
aese \blk2,\rdk0
aesmc \blk2,\blk2
aese \blk2,\rdk1
aesmc \blk2,\blk2
aese \blk3,\rdk0
aesmc \blk3,\blk3
aese \blk3,\rdk1
aesmc \blk3,\blk3
aese \blk4,\rdk0
aesmc \blk4,\blk4
aese \blk4,\rdk1
aesmc \blk4,\blk4
aese \blk5,\rdk0
aesmc \blk5,\blk5
aese \blk5,\rdk1
aesmc \blk5,\blk5
ld1 {\rdk0s,\rdk1s},[\key],#32
subs \rounds,\rounds,#2
b.gt .Loop_enc_6_blks
aese \blk0,\rdk0
aesmc \blk0,\blk0
aese \blk1,\rdk0
aesmc \blk1,\blk1
aese \blk2,\rdk0
aesmc \blk2,\blk2
aese \blk3,\rdk0
aesmc \blk3,\blk3
aese \blk4,\rdk0
aesmc \blk4,\blk4
aese \blk5,\rdk0
aesmc \blk5,\blk5
ld1 {\rdk0s},[\key]
aese \blk0,\rdk1
aese \blk1,\rdk1
aese \blk2,\rdk1
aese \blk3,\rdk1
aese \blk4,\rdk1
aese \blk5,\rdk1
eor \blk0,\blk0,\rdk0
eor \blk1,\blk1,\rdk0
eor \blk2,\blk2,\rdk0
eor \blk3,\blk3,\rdk0
eor \blk4,\blk4,\rdk0
eor \blk5,\blk5,\rdk0
.endm
/*
* AES_ENC_7_BLKS
*/
.macro AES_ENC_7_BLKS key blk0 blk1 blk2 blk3 blk4 blk5 blk6 rdk0s rdk1s rdk0 rdk1 rounds
ldr \rounds,[\key,#240]
ld1 {\rdk0s,\rdk1s},[\key],#32
sub \rounds,\rounds,#2
.Loop_enc_7_blks:
aese \blk0,\rdk0
aesmc \blk0,\blk0
aese \blk0,\rdk1
aesmc \blk0,\blk0
aese \blk1,\rdk0
aesmc \blk1,\blk1
aese \blk1,\rdk1
aesmc \blk1,\blk1
aese \blk2,\rdk0
aesmc \blk2,\blk2
aese \blk2,\rdk1
aesmc \blk2,\blk2
aese \blk3,\rdk0
aesmc \blk3,\blk3
aese \blk3,\rdk1
aesmc \blk3,\blk3
aese \blk4,\rdk0
aesmc \blk4,\blk4
aese \blk4,\rdk1
aesmc \blk4,\blk4
aese \blk5,\rdk0
aesmc \blk5,\blk5
aese \blk5,\rdk1
aesmc \blk5,\blk5
aese \blk6,\rdk0
aesmc \blk6,\blk6
aese \blk6,\rdk1
aesmc \blk6,\blk6
ld1 {\rdk0s,\rdk1s},[\key],#32
subs \rounds,\rounds,#2
b.gt .Loop_enc_7_blks
aese \blk0,\rdk0
aesmc \blk0,\blk0
aese \blk1,\rdk0
aesmc \blk1,\blk1
aese \blk2,\rdk0
aesmc \blk2,\blk2
aese \blk3,\rdk0
aesmc \blk3,\blk3
aese \blk4,\rdk0
aesmc \blk4,\blk4
aese \blk5,\rdk0
aesmc \blk5,\blk5
aese \blk6,\rdk0
aesmc \blk6,\blk6
ld1 {\rdk0s},[\key]
aese \blk0,\rdk1
aese \blk1,\rdk1
aese \blk2,\rdk1
aese \blk3,\rdk1
aese \blk4,\rdk1
aese \blk5,\rdk1
aese \blk6,\rdk1
eor \blk0,\blk0,\rdk0
eor \blk1,\blk1,\rdk0
eor \blk2,\blk2,\rdk0
eor \blk3,\blk3,\rdk0
eor \blk4,\blk4,\rdk0
eor \blk5,\blk5,\rdk0
eor \blk6,\blk6,\rdk0
.endm
/*
* AES_ENC_8_BLKS
*/
.macro AES_ENC_8_BLKS key blk0 blk1 blk2 blk3 blk4 blk5 blk6 blk7 rdk0s rdk1s rdk0 rdk1 rounds
ldr \rounds,[\key,#240]
ld1 {\rdk0s,\rdk1s},[\key],#32
sub \rounds,\rounds,#2
.Loop_enc_8_blks:
aese \blk0,\rdk0
aesmc \blk0,\blk0
aese \blk0,\rdk1
aesmc \blk0,\blk0
aese \blk1,\rdk0
aesmc \blk1,\blk1
aese \blk1,\rdk1
aesmc \blk1,\blk1
aese \blk2,\rdk0
aesmc \blk2,\blk2
aese \blk2,\rdk1
aesmc \blk2,\blk2
aese \blk3,\rdk0
aesmc \blk3,\blk3
aese \blk3,\rdk1
aesmc \blk3,\blk3
aese \blk4,\rdk0
aesmc \blk4,\blk4
aese \blk4,\rdk1
aesmc \blk4,\blk4
aese \blk5,\rdk0
aesmc \blk5,\blk5
aese \blk5,\rdk1
aesmc \blk5,\blk5
aese \blk6,\rdk0
aesmc \blk6,\blk6
aese \blk6,\rdk1
aesmc \blk6,\blk6
aese \blk7,\rdk0
aesmc \blk7,\blk7
aese \blk7,\rdk1
aesmc \blk7,\blk7
ld1 {\rdk0s,\rdk1s},[\key],#32
subs \rounds,\rounds,#2
b.gt .Loop_enc_8_blks
aese \blk0,\rdk0
aesmc \blk0,\blk0
aese \blk1,\rdk0
aesmc \blk1,\blk1
aese \blk2,\rdk0
aesmc \blk2,\blk2
aese \blk3,\rdk0
aesmc \blk3,\blk3
aese \blk4,\rdk0
aesmc \blk4,\blk4
aese \blk5,\rdk0
aesmc \blk5,\blk5
aese \blk6,\rdk0
aesmc \blk6,\blk6
aese \blk7,\rdk0
aesmc \blk7,\blk7
ld1 {\rdk0s},[\key]
aese \blk0,\rdk1
aese \blk1,\rdk1
aese \blk2,\rdk1
aese \blk3,\rdk1
aese \blk4,\rdk1
aese \blk5,\rdk1
aese \blk6,\rdk1
aese \blk7,\rdk1
eor \blk0,\blk0,\rdk0
eor \blk1,\blk1,\rdk0
eor \blk2,\blk2,\rdk0
eor \blk3,\blk3,\rdk0
eor \blk4,\blk4,\rdk0
eor \blk5,\blk5,\rdk0
eor \blk6,\blk6,\rdk0
eor \blk7,\blk7,\rdk0
.endm
/*
* AES_DEC_2_BLKS
*/
.macro AES_DEC_2_BLKS key blk0 blk1 rdk0s rdk1s rdk0 rdk1 rounds
ldr \rounds,[\key,#240]
ld1 {\rdk0s,\rdk1s},[\key],#32
sub \rounds,\rounds,#2
.Loop_dec_2_blks:
aesd \blk0,\rdk0
aesimc \blk0,\blk0
aesd \blk0,\rdk1
aesimc \blk0,\blk0
aesd \blk1,\rdk0
aesimc \blk1,\blk1
aesd \blk1,\rdk1
aesimc \blk1,\blk1
ld1 {\rdk0s,\rdk1s},[\key],#32
subs \rounds,\rounds,#2
b.gt .Loop_dec_2_blks
aesd \blk0,\rdk0
aesimc \blk0,\blk0
aesd \blk1,\rdk0
aesimc \blk1,\blk1
ld1 {\rdk0s},[\key]
aesd \blk0,\rdk1
aesd \blk1,\rdk1
eor \blk0,\blk0,\rdk0
eor \blk1,\blk1,\rdk0
.endm
/*
* AES_DEC_3_BLKS
*/
.macro AES_DEC_3_BLKS key blk0 blk1 blk2 rdk0s rdk1s rdk0 rdk1 rounds
ldr \rounds,[\key,#240]
ld1 {\rdk0s,\rdk1s},[\key],#32
sub \rounds,\rounds,#2
.align 3
.Loop_dec_3_blks:
aesd \blk0,\rdk0
aesimc \blk0,\blk0
aesd \blk0,\rdk1
aesimc \blk0,\blk0
aesd \blk1,\rdk0
aesimc \blk1,\blk1
aesd \blk1,\rdk1
aesimc \blk1,\blk1
aesd \blk2,\rdk0
aesimc \blk2,\blk2
aesd \blk2,\rdk1
aesimc \blk2,\blk2
ld1 {\rdk0s,\rdk1s},[\key],#32
subs \rounds,\rounds,#2
b.gt .Loop_dec_3_blks
aesd \blk0,\rdk0
aesimc \blk0,\blk0
aesd \blk1,\rdk0
aesimc \blk1,\blk1
aesd \blk2,\rdk0
aesimc \blk2,\blk2
ld1 {\rdk0s},[\key]
aesd \blk0,\rdk1
aesd \blk1,\rdk1
aesd \blk2,\rdk1
eor \blk0,\blk0,\rdk0
eor \blk1,\blk1,\rdk0
eor \blk2,\blk2,\rdk0
.endm
/*
* AES_DEC_4_BLKS
*/
.macro AES_DEC_4_BLKS key blk0 blk1 blk2 blk3 rdk0s rdk1s rdk0 rdk1 rounds
ldr \rounds,[\key,#240]
ld1 {\rdk0s,\rdk1s},[\key],#32
sub \rounds,\rounds,#2
.Loop_dec_4_blks:
aesd \blk0,\rdk0
aesimc \blk0,\blk0
aesd \blk0,\rdk1
aesimc \blk0,\blk0
aesd \blk1,\rdk0
aesimc \blk1,\blk1
aesd \blk1,\rdk1
aesimc \blk1,\blk1
aesd \blk2,\rdk0
aesimc \blk2,\blk2
aesd \blk2,\rdk1
aesimc \blk2,\blk2
aesd \blk3,\rdk0
aesimc \blk3,\blk3
aesd \blk3,\rdk1
aesimc \blk3,\blk3
ld1 {\rdk0s,\rdk1s},[\key],#32
subs \rounds,\rounds,#2
b.gt .Loop_dec_4_blks
aesd \blk0,\rdk0
aesimc \blk0,\blk0
aesd \blk1,\rdk0
aesimc \blk1,\blk1
aesd \blk2,\rdk0
aesimc \blk2,\blk2
aesd \blk3,\rdk0
aesimc \blk3,\blk3
ld1 {\rdk0s},[\key]
aesd \blk0,\rdk1
aesd \blk1,\rdk1
aesd \blk2,\rdk1
aesd \blk3,\rdk1
eor \blk0,\blk0,\rdk0
eor \blk1,\blk1,\rdk0
eor \blk2,\blk2,\rdk0
eor \blk3,\blk3,\rdk0
.endm
/*
* AES_DEC_5_BLKS
*/
.macro AES_DEC_5_BLKS key blk0 blk1 blk2 blk3 blk4 rdk0s rdk1s rdk0 rdk1 rounds
ldr \rounds,[\key,#240]
ld1 {\rdk0s,\rdk1s},[\key],#32
sub \rounds,\rounds,#2
.Loop_dec_5_blks:
aesd \blk0,\rdk0
aesimc \blk0,\blk0
aesd \blk0,\rdk1
aesimc \blk0,\blk0
aesd \blk1,\rdk0
aesimc \blk1,\blk1
aesd \blk1,\rdk1
aesimc \blk1,\blk1
aesd \blk2,\rdk0
aesimc \blk2,\blk2
aesd \blk2,\rdk1
aesimc \blk2,\blk2
aesd \blk3,\rdk0
aesimc \blk3,\blk3
aesd \blk3,\rdk1
aesimc \blk3,\blk3
aesd \blk4,\rdk0
aesimc \blk4,\blk4
aesd \blk4,\rdk1
aesimc \blk4,\blk4
ld1 {\rdk0s,\rdk1s},[\key],#32
subs \rounds,\rounds,#2
b.gt .Loop_dec_5_blks
aesd \blk0,\rdk0
aesimc \blk0,\blk0
aesd \blk1,\rdk0
aesimc \blk1,\blk1
aesd \blk2,\rdk0
aesimc \blk2,\blk2
aesd \blk3,\rdk0
aesimc \blk3,\blk3
aesd \blk4,\rdk0
aesimc \blk4,\blk4
ld1 {\rdk0s},[\key]
aesd \blk0,\rdk1
aesd \blk1,\rdk1
aesd \blk2,\rdk1
aesd \blk3,\rdk1
aesd \blk4,\rdk1
eor \blk0,\blk0,\rdk0
eor \blk1,\blk1,\rdk0
eor \blk2,\blk2,\rdk0
eor \blk3,\blk3,\rdk0
eor \blk4,\blk4,\rdk0
.endm
/*
* AES_DEC_6_BLKS
*/
.macro AES_DEC_6_BLKS key blk0 blk1 blk2 blk3 blk4 blk5 rdk0s rdk1s rdk0 rdk1 rounds
ldr \rounds,[\key,#240]
ld1 {\rdk0s,\rdk1s},[\key],#32
sub \rounds,\rounds,#2
.Loop_dec_6_blks:
aesd \blk0,\rdk0
aesimc \blk0,\blk0
aesd \blk0,\rdk1
aesimc \blk0,\blk0
aesd \blk1,\rdk0
aesimc \blk1,\blk1
aesd \blk1,\rdk1
aesimc \blk1,\blk1
aesd \blk2,\rdk0
aesimc \blk2,\blk2
aesd \blk2,\rdk1
aesimc \blk2,\blk2
aesd \blk3,\rdk0
aesimc \blk3,\blk3
aesd \blk3,\rdk1
aesimc \blk3,\blk3
aesd \blk4,\rdk0
aesimc \blk4,\blk4
aesd \blk4,\rdk1
aesimc \blk4,\blk4
aesd \blk5,\rdk0
aesimc \blk5,\blk5
aesd \blk5,\rdk1
aesimc \blk5,\blk5
ld1 {\rdk0s,\rdk1s},[\key],#32
subs \rounds,\rounds,#2
b.gt .Loop_dec_6_blks
aesd \blk0,\rdk0
aesimc \blk0,\blk0
aesd \blk1,\rdk0
aesimc \blk1,\blk1
aesd \blk2,\rdk0
aesimc \blk2,\blk2
aesd \blk3,\rdk0
aesimc \blk3,\blk3
aesd \blk4,\rdk0
aesimc \blk4,\blk4
aesd \blk5,\rdk0
aesimc \blk5,\blk5
ld1 {\rdk0s},[\key]
aesd \blk0,\rdk1
aesd \blk1,\rdk1
aesd \blk2,\rdk1
aesd \blk3,\rdk1
aesd \blk4,\rdk1
aesd \blk5,\rdk1
eor \blk0,\blk0,\rdk0
eor \blk1,\blk1,\rdk0
eor \blk2,\blk2,\rdk0
eor \blk3,\blk3,\rdk0
eor \blk4,\blk4,\rdk0
eor \blk5,\blk5,\rdk0
.endm
/*
* AES_DEC_7_BLKS
*/
.macro AES_DEC_7_BLKS key blk0 blk1 blk2 blk3 blk4 blk5 blk6 rdk0s rdk1s rdk0 rdk1 rounds
ldr \rounds,[\key,#240]
ld1 {\rdk0s,\rdk1s},[\key],#32
sub \rounds,\rounds,#2
.Loop_dec_7_blks:
aesd \blk0,\rdk0
aesimc \blk0,\blk0
aesd \blk0,\rdk1
aesimc \blk0,\blk0
aesd \blk1,\rdk0
aesimc \blk1,\blk1
aesd \blk1,\rdk1
aesimc \blk1,\blk1
aesd \blk2,\rdk0
aesimc \blk2,\blk2
aesd \blk2,\rdk1
aesimc \blk2,\blk2
aesd \blk3,\rdk0
aesimc \blk3,\blk3
aesd \blk3,\rdk1
aesimc \blk3,\blk3
aesd \blk4,\rdk0
aesimc \blk4,\blk4
aesd \blk4,\rdk1
aesimc \blk4,\blk4
aesd \blk5,\rdk0
aesimc \blk5,\blk5
aesd \blk5,\rdk1
aesimc \blk5,\blk5
aesd \blk6,\rdk0
aesimc \blk6,\blk6
aesd \blk6,\rdk1
aesimc \blk6,\blk6
ld1 {\rdk0s,\rdk1s},[\key],#32
subs \rounds,\rounds,#2
b.gt .Loop_dec_7_blks
aesd \blk0,\rdk0
aesimc \blk0,\blk0
aesd \blk1,\rdk0
aesimc \blk1,\blk1
aesd \blk2,\rdk0
aesimc \blk2,\blk2
aesd \blk3,\rdk0
aesimc \blk3,\blk3
aesd \blk4,\rdk0
aesimc \blk4,\blk4
aesd \blk5,\rdk0
aesimc \blk5,\blk5
aesd \blk6,\rdk0
aesimc \blk6,\blk6
ld1 {\rdk0s},[\key]
aesd \blk0,\rdk1
aesd \blk1,\rdk1
aesd \blk2,\rdk1
aesd \blk3,\rdk1
aesd \blk4,\rdk1
aesd \blk5,\rdk1
aesd \blk6,\rdk1
eor \blk0,\blk0,\rdk0
eor \blk1,\blk1,\rdk0
eor \blk2,\blk2,\rdk0
eor \blk3,\blk3,\rdk0
eor \blk4,\blk4,\rdk0
eor \blk5,\blk5,\rdk0
eor \blk6,\blk6,\rdk0
.endm
/*
* AES_DEC_8_BLKS
*/
.macro AES_DEC_8_BLKS key blk0 blk1 blk2 blk3 blk4 blk5 blk6 blk7 rdk0s rdk1s rdk0 rdk1 rounds
ldr \rounds,[\key,#240]
ld1 {\rdk0s,\rdk1s},[\key],#32
sub \rounds,\rounds,#2
.align 5
.Loop_dec_8_blks:
aesd \blk0,\rdk0
aesimc \blk0,\blk0
aesd \blk5,\rdk0
aesimc \blk5,\blk5
aesd \blk1,\rdk0
aesimc \blk1,\blk1
aesd \blk6,\rdk0
aesimc \blk6,\blk6
aesd \blk2,\rdk0
aesimc \blk2,\blk2
aesd \blk3,\rdk0
aesimc \blk3,\blk3
aesd \blk4,\rdk0
aesimc \blk4,\blk4
aesd \blk7,\rdk0
aesimc \blk7,\blk7
aesd \blk0,\rdk1
aesimc \blk0,\blk0
aesd \blk5,\rdk1
aesimc \blk5,\blk5
aesd \blk1,\rdk1
aesimc \blk1,\blk1
aesd \blk6,\rdk1
aesimc \blk6,\blk6
aesd \blk2,\rdk1
aesimc \blk2,\blk2
aesd \blk3,\rdk1
aesimc \blk3,\blk3
aesd \blk4,\rdk1
aesimc \blk4,\blk4
aesd \blk7,\rdk1
ld1 {\rdk0s, \rdk1s},[\key],#32
aesimc \blk7,\blk7
subs \rounds,\rounds,#2
b.gt .Loop_dec_8_blks
aesd \blk0,\rdk0
aesimc \blk0,\blk0
aesd \blk1,\rdk0
aesimc \blk1,\blk1
aesd \blk2,\rdk0
aesimc \blk2,\blk2
aesd \blk3,\rdk0
aesimc \blk3,\blk3
aesd \blk4,\rdk0
aesimc \blk4,\blk4
aesd \blk5,\rdk0
aesimc \blk5,\blk5
aesd \blk6,\rdk0
aesimc \blk6,\blk6
aesd \blk7,\rdk0
ld1 {\rdk0s},[\key]
aesimc \blk7,\blk7
aesd \blk0,\rdk1
aesd \blk1,\rdk1
aesd \blk2,\rdk1
aesd \blk3,\rdk1
aesd \blk4,\rdk1
aesd \blk5,\rdk1
aesd \blk6,\rdk1
aesd \blk7,\rdk1
eor \blk0,\blk0,\rdk0
eor \blk1,\blk1,\rdk0
eor \blk2,\blk2,\rdk0
eor \blk3,\blk3,\rdk0
eor \blk4,\blk4,\rdk0
eor \blk5,\blk5,\rdk0
eor \blk6,\blk6,\rdk0
eor \blk7,\blk7,\rdk0
.endm
#endif
| 2302_82127028/openHiTLS-examples_1508 | crypto/aes/src/asm/crypt_aes_macro_armv8.s | Unix Assembly | unknown | 19,831 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#include "hitls_build.h"
#ifdef HITLS_CRYPTO_AES
.file "crypt_aes_macro_x86_64.s"
/* AES_ENC_1_BLK */
.macro AES_ENC_1_BLK key round rdk blk
.align 16
.Laesenc_loop:
leaq 16(\key), \key
movdqu (\key), \rdk
aesenc \rdk, \blk
decl \round
jnz .Laesenc_loop
leaq 16(\key), \key
movdqu (\key), \rdk
aesenclast \rdk, \blk
.endm
/* AES_ENC_2_BLKS */
.macro AES_ENC_2_BLKS key round rdk blk0 blk1
.align 16
.Laesenc_2_blks_loop:
leaq 16(\key), \key
movdqu (\key), \rdk
aesenc \rdk, \blk0
aesenc \rdk, \blk1
decl \round
jnz .Laesenc_2_blks_loop
leaq 16(\key), \key
movdqu (\key), \rdk
aesenclast \rdk, \blk0
aesenclast \rdk, \blk1
.endm
/* AES_ENC_3_BLKS */
.macro AES_ENC_3_BLKS key round rdk blk0 blk1 blk2
.align 16
.Laesenc_3_blks_loop:
leaq 16(\key), \key
movdqu (\key), \rdk
aesenc \rdk, \blk0
aesenc \rdk, \blk1
aesenc \rdk, \blk2
decl \round
jnz .Laesenc_3_blks_loop
leaq 16(\key), \key
movdqu (\key), \rdk
aesenclast \rdk, \blk0
aesenclast \rdk, \blk1
aesenclast \rdk, \blk2
.endm
/* AES_ENC_4_BLKS */
.macro AES_ENC_4_BLKS key round rdk blk0 blk1 blk2 blk3
.align 16
.Laesenc_4_blks_loop:
leaq 16(\key), \key
movdqu (\key), \rdk
aesenc \rdk, \blk0
aesenc \rdk, \blk1
aesenc \rdk, \blk2
aesenc \rdk, \blk3
decl \round
jnz .Laesenc_4_blks_loop
leaq 16(\key), \key
movdqu (\key), \rdk
aesenclast \rdk, \blk0
aesenclast \rdk, \blk1
aesenclast \rdk, \blk2
aesenclast \rdk, \blk3
.endm
/* AES_ENC_5_BLKS */
.macro AES_ENC_5_BLKS key round rdk blk0 blk1 blk2 blk3 blk4
.align 16
.Laesenc_5_blks_loop:
leaq 16(\key), \key
movdqu (\key), \rdk
aesenc \rdk, \blk0
aesenc \rdk, \blk1
aesenc \rdk, \blk2
aesenc \rdk, \blk3
aesenc \rdk, \blk4
decl \round
jnz .Laesenc_5_blks_loop
leaq 16(\key), \key
movdqu (\key), \rdk
aesenclast \rdk, \blk0
aesenclast \rdk, \blk1
aesenclast \rdk, \blk2
aesenclast \rdk, \blk3
aesenclast \rdk, \blk4
.endm
/* AES_ENC_6_BLKS */
.macro AES_ENC_6_BLKS key round rdk blk0 blk1 blk2 blk3 blk4 blk5
.align 16
.Laesenc_6_blks_loop:
leaq 16(\key), \key
movdqu (\key), \rdk
aesenc \rdk, \blk0
aesenc \rdk, \blk1
aesenc \rdk, \blk2
aesenc \rdk, \blk3
aesenc \rdk, \blk4
aesenc \rdk, \blk5
decl \round
jnz .Laesenc_6_blks_loop
leaq 16(\key), \key
movdqu (\key), \rdk
aesenclast \rdk, \blk0
aesenclast \rdk, \blk1
aesenclast \rdk, \blk2
aesenclast \rdk, \blk3
aesenclast \rdk, \blk4
aesenclast \rdk, \blk5
.endm
/* AES_ENC_7_BLKS */
.macro AES_ENC_7_BLKS key round rdk blk0 blk1 blk2 blk3 blk4 blk5 blk6
.align 16
.Laesenc_7_blks_loop:
leaq 16(\key), \key
movdqu (\key), \rdk
aesenc \rdk, \blk0
aesenc \rdk, \blk1
aesenc \rdk, \blk2
aesenc \rdk, \blk3
aesenc \rdk, \blk4
aesenc \rdk, \blk5
aesenc \rdk, \blk6
decl \round
jnz .Laesenc_7_blks_loop
leaq 16(\key), \key
movdqu (\key), \rdk
aesenclast \rdk, \blk0
aesenclast \rdk, \blk1
aesenclast \rdk, \blk2
aesenclast \rdk, \blk3
aesenclast \rdk, \blk4
aesenclast \rdk, \blk5
aesenclast \rdk, \blk6
.endm
/* AES_ENC_8_BLKS */
.macro AES_ENC_8_BLKS key round rdk blk0 blk1 blk2 blk3 blk4 blk5 blk6 blk7
.align 16
.Laesenc_8_blks_loop:
leaq 16(\key), \key
movdqu (\key), \rdk
aesenc \rdk, \blk0
aesenc \rdk, \blk1
aesenc \rdk, \blk2
aesenc \rdk, \blk3
aesenc \rdk, \blk4
aesenc \rdk, \blk5
aesenc \rdk, \blk6
aesenc \rdk, \blk7
decl \round
jnz .Laesenc_8_blks_loop
leaq 16(\key), \key
movdqu (\key), \rdk
aesenclast \rdk, \blk0
aesenclast \rdk, \blk1
aesenclast \rdk, \blk2
aesenclast \rdk, \blk3
aesenclast \rdk, \blk4
aesenclast \rdk, \blk5
aesenclast \rdk, \blk6
aesenclast \rdk, \blk7
.endm
/* AES_ENC_14_BLKS */
.macro AES_ENC_14_BLKS ARG2 key round rdk blk0 blk1 blk2 blk3 blk4 blk5 blk6 blk7 blk8 blk9 blk10 blk11 blk12 blk13
.align 16
.Laesenc_14_blks_loop:
leaq 16(\key), \key
movdqu (\key), \rdk
aesenc \rdk, \blk0
aesenc \rdk, \blk1
aesenc \rdk, \blk2
aesenc \rdk, \blk3
aesenc \rdk, \blk4
aesenc \rdk, \blk5
aesenc \rdk, \blk6
aesenc \rdk, \blk7
aesenc \rdk, \blk8
aesenc \rdk, \blk9
aesenc \rdk, \blk10
aesenc \rdk, \blk11
aesenc \rdk, \blk12
aesenc \rdk, \blk13
decl \round
jnz .Laesenc_14_blks_loop
leaq 16(\key), \key
movdqu (\key), \rdk
aesenclast \rdk, \blk0
aesenclast \rdk, \blk1
aesenclast \rdk, \blk2
aesenclast \rdk, \blk3
aesenclast \rdk, \blk4
aesenclast \rdk, \blk5
aesenclast \rdk, \blk6
aesenclast \rdk, \blk7
aesenclast \rdk, \blk8
aesenclast \rdk, \blk9
aesenclast \rdk, \blk10
aesenclast \rdk, \blk11
aesenclast \rdk, \blk12
aesenclast \rdk, \blk13
.endm
/* AES_DEC_1_BLK */
.macro AES_DEC_1_BLK key round rdk blk
.align 16
.Laesdec_loop:
leaq 16(\key), \key
movdqu (\key), \rdk
aesdec \rdk, \blk
decl \round
jnz .Laesdec_loop
leaq 16(\key), \key
movdqu (\key), \rdk
aesdeclast \rdk, \blk
.endm
/* AES_DEC_2_BLKS */
.macro AES_DEC_2_BLKS key round rdk blk0 blk1
.align 32
.Laesdec_2_blks_loop:
leaq 16(\key), \key
movdqu (\key), \rdk
aesdec \rdk, \blk0
aesdec \rdk, \blk1
decl \round
jnz .Laesdec_2_blks_loop
leaq 16(\key), \key
movdqu (\key), \rdk
aesdeclast \rdk, \blk0
aesdeclast \rdk, \blk1
.endm
/* AES_DEC_3_BLKS */
.macro AES_DEC_3_BLKS key round rdk blk0 blk1 blk2
.align 16
.Laesdec_3_blks_loop:
leaq 16(\key), \key
movdqu (\key), \rdk
aesdec \rdk, \blk0
aesdec \rdk, \blk1
aesdec \rdk, \blk2
decl \round
jnz .Laesdec_3_blks_loop
leaq 16(\key), \key
movdqu (\key), \rdk
aesdeclast \rdk, \blk0
aesdeclast \rdk, \blk1
aesdeclast \rdk, \blk2
.endm
/* AES_DEC_4_BLKS */
.macro AES_DEC_4_BLKS key round rdk blk0 blk1 blk2 blk3
.align 16
.Laesdec_4_blks_loop:
leaq 16(\key), \key
movdqu (\key), \rdk
aesdec \rdk, \blk0
aesdec \rdk, \blk1
aesdec \rdk, \blk2
aesdec \rdk, \blk3
decl \round
jnz .Laesdec_4_blks_loop
leaq 16(\key), \key
movdqu (\key), \rdk
aesdeclast \rdk, \blk0
aesdeclast \rdk, \blk1
aesdeclast \rdk, \blk2
aesdeclast \rdk, \blk3
.endm
/* AES_DEC_5_BLKS */
.macro AES_DEC_5_BLKS key round rdk blk0 blk1 blk2 blk3 blk4
.align 16
.Laesdec_5_blks_loop:
leaq 16(\key), \key
movdqu (\key), \rdk
aesdec \rdk, \blk0
aesdec \rdk, \blk1
aesdec \rdk, \blk2
aesdec \rdk, \blk3
aesdec \rdk, \blk4
decl \round
jnz .Laesdec_5_blks_loop
leaq 16(\key), \key
movdqu (\key), \rdk
aesdeclast \rdk, \blk0
aesdeclast \rdk, \blk1
aesdeclast \rdk, \blk2
aesdeclast \rdk, \blk3
aesdeclast \rdk, \blk4
.endm
/* AES_DEC_6_BLKS */
.macro AES_DEC_6_BLKS key round rdk blk0 blk1 blk2 blk3 blk4 blk5
.align 16
.Laesdec_6_blks_loop:
leaq 16(\key), \key
movdqu (\key), \rdk
aesdec \rdk, \blk0
aesdec \rdk, \blk1
aesdec \rdk, \blk2
aesdec \rdk, \blk3
aesdec \rdk, \blk4
aesdec \rdk, \blk5
decl \round
jnz .Laesdec_6_blks_loop
leaq 16(\key), \key
movdqu (\key), \rdk
aesdeclast \rdk, \blk0
aesdeclast \rdk, \blk1
aesdeclast \rdk, \blk2
aesdeclast \rdk, \blk3
aesdeclast \rdk, \blk4
aesdeclast \rdk, \blk5
.endm
/* AES_DEC_7_BLKS */
.macro AES_DEC_7_BLKS key round rdk blk0 blk1 blk2 blk3 blk4 blk5 blk6
.align 16
.Laesdec_7_blks_loop:
leaq 16(\key), \key
movdqu (\key), \rdk
aesdec \rdk, \blk0
aesdec \rdk, \blk1
aesdec \rdk, \blk2
aesdec \rdk, \blk3
aesdec \rdk, \blk4
aesdec \rdk, \blk5
aesdec \rdk, \blk6
decl \round
jnz .Laesdec_7_blks_loop
leaq 16(\key), \key
movdqu (\key), \rdk
aesdeclast \rdk, \blk0
aesdeclast \rdk, \blk1
aesdeclast \rdk, \blk2
aesdeclast \rdk, \blk3
aesdeclast \rdk, \blk4
aesdeclast \rdk, \blk5
aesdeclast \rdk, \blk6
.endm
/* AES_DEC_8_BLKS */
.macro AES_DEC_8_BLKS key round rdk blk0 blk1 blk2 blk3 blk4 blk5 blk6 blk7
.align 16
.Laesdec_8_blks_loop:
leaq 16(\key), \key
movdqu (\key), \rdk
aesdec \rdk, \blk0
aesdec \rdk, \blk1
aesdec \rdk, \blk2
aesdec \rdk, \blk3
aesdec \rdk, \blk4
aesdec \rdk, \blk5
aesdec \rdk, \blk6
aesdec \rdk, \blk7
decl \round
jnz .Laesdec_8_blks_loop
leaq 16(\key), \key
movdqu (\key), \rdk
aesdeclast \rdk, \blk0
aesdeclast \rdk, \blk1
aesdeclast \rdk, \blk2
aesdeclast \rdk, \blk3
aesdeclast \rdk, \blk4
aesdeclast \rdk, \blk5
aesdeclast \rdk, \blk6
aesdeclast \rdk, \blk7
.endm
/* AES_DEC_14_BLKS */
.macro AES_DEC_14_BLKS key round rdk blk0 blk1 blk2 blk3 blk4 blk5 blk6 blk7 blk8 blk9 blk10 blk11 blk12 blk13
.align 16
.Laesdec_14_blks_loop:
leaq 16(\key), \key
movdqu (\key), \rdk
aesdec \rdk, \blk0
aesdec \rdk, \blk1
aesdec \rdk, \blk2
aesdec \rdk, \blk3
aesdec \rdk, \blk4
aesdec \rdk, \blk5
aesdec \rdk, \blk6
aesdec \rdk, \blk7
aesdec \rdk, \blk8
aesdec \rdk, \blk9
aesdec \rdk, \blk10
aesdec \rdk, \blk11
aesdec \rdk, \blk12
aesdec \rdk, \blk13
decl \round
jnz .Laesdec_14_blks_loop
leaq 16(\key), \key
movdqu (\key), \rdk
aesdeclast \rdk, \blk0
aesdeclast \rdk, \blk1
aesdeclast \rdk, \blk2
aesdeclast \rdk, \blk3
aesdeclast \rdk, \blk4
aesdeclast \rdk, \blk5
aesdeclast \rdk, \blk6
aesdeclast \rdk, \blk7
aesdeclast \rdk, \blk8
aesdeclast \rdk, \blk9
aesdeclast \rdk, \blk10
aesdeclast \rdk, \blk11
aesdeclast \rdk, \blk12
aesdeclast \rdk, \blk13
.endm
#endif
| 2302_82127028/openHiTLS-examples_1508 | crypto/aes/src/asm/crypt_aes_macro_x86_64.s | Unix Assembly | unknown | 10,609 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#include "hitls_build.h"
#ifdef HITLS_CRYPTO_AES
#include "crypt_aes_macro_x86_64.s"
.file "crypt_aes_x86_64.S"
.text
.set ARG1, %rdi
.set ARG2, %rsi
.set ARG3, %rdx
.set ARG4, %rcx
.set ARG5, %r8
.set ARG6, %r9
.set RET, %eax
.set XM0, %xmm0
.set XM1, %xmm1
.set XM2, %xmm2
.set XM3, %xmm3
.set XM4, %xmm4
.set XM5, %xmm5
/**
* aes128 macros for key extension processing.
*/
.macro KEY_EXPANSION_HELPER_128 xm0 xm1 xm2
vpermilps $0xff, \xm1, \xm1
vpslldq $4, \xm0, \xm2
vpxor \xm2, \xm0, \xm0
vpslldq $4, \xm2, \xm2
vpxor \xm2, \xm0, \xm0
vpslldq $4, \xm2, \xm2
vpxor \xm2, \xm0, \xm0
vpxor \xm1, \xm0, \xm0
.endm
/**
* aes192 macros for key extension processing.
*/
.macro KEY_EXPANSION_HELPER_192 xm1 xm3
vpslldq $4, \xm1, \xm3
vpxor \xm3, \xm1, \xm1
vpslldq $4, \xm3, \xm3
vpxor \xm3, \xm1, \xm1
vpslldq $4, \xm3, \xm3
vpxor \xm3, \xm1, \xm1
.endm
/**
* Function description: Sets the AES encryption key. Key length: 128 bits.
* Function prototype: void SetEncryptKey128(CRYPT_AES_Key *ctx, const uint8_t *key);
* Input register:
* x0:Pointer to the output key structure.
* x1:Pointer to the input key.
* Change register:xmm0-xmm2.
* Output register:None.
* Function/Macro Call: None.
*/
.globl SetEncryptKey128
.type SetEncryptKey128, @function
SetEncryptKey128:
.cfi_startproc
movl $10, 240(%rdi)
movdqu (ARG2), XM0
movdqu XM0, (ARG1)
aeskeygenassist $0x01, XM0, XM1
KEY_EXPANSION_HELPER_128 XM0, XM1, XM2
movdqu XM0, 16(ARG1)
aeskeygenassist $0x02, XM0, XM1
KEY_EXPANSION_HELPER_128 XM0, XM1, XM2
movdqu XM0, 32(ARG1)
aeskeygenassist $0x04, XM0, XM1
KEY_EXPANSION_HELPER_128 XM0, XM1, XM2
movdqu XM0, 48(ARG1)
aeskeygenassist $0x08, XM0, XM1
KEY_EXPANSION_HELPER_128 XM0, XM1, XM2
movdqu XM0, 64(ARG1)
aeskeygenassist $0x10, XM0, XM1
KEY_EXPANSION_HELPER_128 XM0, XM1, XM2
movdqu XM0, 80(ARG1)
aeskeygenassist $0x20, XM0, XM1
KEY_EXPANSION_HELPER_128 XM0, XM1, XM2
movdqu XM0, 96(ARG1)
aeskeygenassist $0x40, XM0, XM1
KEY_EXPANSION_HELPER_128 XM0, XM1, XM2
movdqu XM0, 112(ARG1)
aeskeygenassist $0x80, XM0, XM1
KEY_EXPANSION_HELPER_128 XM0, XM1, XM2
movdqu XM0, 128(ARG1)
aeskeygenassist $0x1b, XM0, XM1
KEY_EXPANSION_HELPER_128 XM0, XM1, XM2
movdqu XM0, 144(ARG1)
aeskeygenassist $0x36, XM0, XM1
KEY_EXPANSION_HELPER_128 XM0, XM1, XM2
movdqu XM0, 160(ARG1)
vpxor XM0, XM0, XM0
vpxor XM1, XM1, XM1
vpxor XM2, XM2, XM2
ret
.cfi_endproc
.size SetEncryptKey128, .-SetEncryptKey128
/**
* Function description: Sets the AES decryption key. Key length: 128 bits.
* Function prototype: void SetDecryptKey128(CRYPT_AES_Key *ctx, const uint8_t *key);
* Input register:
* x0:Pointer to the output key structure.
* x1:Pointer to the input key.
* Change register:xmm0-xmm3.
* Output register: None.
* Function/Macro Call: None.
*/
.globl SetDecryptKey128
.type SetDecryptKey128, @function
SetDecryptKey128:
.cfi_startproc
movl $10, 240(%rdi)
movdqu (ARG2), XM0
movdqu XM0, 160(ARG1)
aeskeygenassist $0x01, XM0, XM1
KEY_EXPANSION_HELPER_128 XM0, XM1, XM2
aesimc XM0, XM3
movdqu XM3, 144(ARG1)
aeskeygenassist $0x02, XM0, XM1
KEY_EXPANSION_HELPER_128 XM0, XM1, XM2
aesimc XM0, XM3
movdqu XM3, 128(ARG1)
aeskeygenassist $0x04, XM0, XM1
KEY_EXPANSION_HELPER_128 XM0, XM1, XM2
aesimc XM0, XM3
movdqu XM3, 112(ARG1)
aeskeygenassist $0x08, XM0, XM1
KEY_EXPANSION_HELPER_128 XM0, XM1, XM2
aesimc XM0, XM3
movdqu XM3, 96(ARG1)
aeskeygenassist $0x10, XM0, XM1
KEY_EXPANSION_HELPER_128 XM0, XM1, XM2
aesimc XM0, XM3
movdqu XM3, 80(ARG1)
aeskeygenassist $0x20, XM0, XM1
KEY_EXPANSION_HELPER_128 XM0, XM1, XM2
aesimc XM0, XM3
movdqu XM3, 64(ARG1)
aeskeygenassist $0x40, XM0, XM1
KEY_EXPANSION_HELPER_128 XM0, XM1, XM2
aesimc XM0, XM3
movdqu XM3, 48(ARG1)
aeskeygenassist $0x80, XM0, XM1
KEY_EXPANSION_HELPER_128 XM0, XM1, XM2
aesimc XM0, XM3
movdqu XM3, 32(ARG1)
aeskeygenassist $0x1b, XM0, XM1
KEY_EXPANSION_HELPER_128 XM0, XM1, XM2
aesimc XM0, XM3
movdqu XM3, 16(ARG1)
aeskeygenassist $0x36, XM0, XM1
KEY_EXPANSION_HELPER_128 XM0, XM1, XM2
movdqu XM0,(ARG1)
vpxor XM0, XM0, XM0
vpxor XM1, XM1, XM1
vpxor XM2, XM2, XM2
vpxor XM3, XM3, XM3
ret
.cfi_endproc
.size SetDecryptKey128, .-SetDecryptKey128
/**
* Function description: Sets the AES encryption key. Key length: 192 bits.
* Function prototype: void SetEncryptKey192(CRYPT_AES_Key *ctx, const uint8_t *key);
* Input register:
* x0:Pointer to the output key structure.
* x1:Pointer to the input key.
* Change register: xmm0-xmm4.
* Output register: None.
* Function/Macro Call: None.
*/
.globl SetEncryptKey192
.type SetEncryptKey192, @function
SetEncryptKey192:
.cfi_startproc
movl $12, 240(ARG1)
movdqu (ARG2), XM0
movdqu 8(ARG2), XM1
movdqu XM0,(ARG1)
vpxor XM4, XM4, XM4
vshufps $0x40, XM0, XM4, XM2
aeskeygenassist $0x01, XM1, XM0
vshufps $0xf0, XM0, XM4, XM0
vpslldq $0x04, XM2, XM3
vpxor XM3, XM2, XM2
vpxor XM2, XM0, XM0
vshufps $0xee, XM0, XM1, XM0
movdqu XM0, 16(ARG1)
movdqu XM1, XM2
vpslldq $4, XM2, XM3
vpxor XM3, XM2, XM2
vpslldq $4, XM3, XM3
vpxor XM3, XM2, XM2
vpslldq $4, XM3, XM3
vpxor XM3, XM2, XM2
vpermilps $0xff, XM0, XM3
vpxor XM3, XM2, XM2
movdqu XM2, 32(ARG1)
vshufps $0x4e, XM2, XM0, XM1
aeskeygenassist $0x02, XM2, XM0
KEY_EXPANSION_HELPER_192 XM1, XM3
vpermilps $0xff, XM0, XM0
vpxor XM1, XM0, XM0
movdqu XM0, 48(ARG1)
vshufps $0x4e, XM0, XM2, XM1
vpslldq $8, XM1, XM2
vpslldq $4, XM2, XM3
vpxor XM3, XM2, XM2
vpermilps $0xff, XM0, XM3
vpxor XM3, XM2, XM2
aeskeygenassist $0x04, XM2, XM3
vpermilps $0xff, XM3, XM3
vpsrldq $8, XM1, XM4
vpslldq $12, XM4, XM4
vpxor XM4, XM1, XM1
vpxor XM3, XM1, XM1
vshufps $0xee, XM1, XM2, XM2
movdqu XM2, 64(ARG1)
vshufps $0x4e, XM2, XM0, XM1
KEY_EXPANSION_HELPER_192 XM1, XM3
vpermilps $0xff, XM2, XM0
vpxor XM1, XM0, XM0
movdqu XM0, 80(ARG1)
vshufps $0x4e, XM0, XM2, XM1
aeskeygenassist $0x08, XM0, XM2
KEY_EXPANSION_HELPER_192 XM1, XM3
vpermilps $0xff, XM2, XM2
vpxor XM1, XM2, XM2
movdqu XM2, 96(ARG1)
vshufps $0x4e, XM2, XM0, XM1
vpslldq $8, XM1, XM0
vpslldq $4, XM0, XM3
vpxor XM3, XM0, XM0
vpermilps $0xff, XM2, XM3
vpxor XM3, XM0, XM0
aeskeygenassist $0x10, XM0, XM3
vpermilps $0xff, XM3, XM3
vpsrldq $8, XM1, XM4
vpslldq $12, XM4, XM4
vpxor XM4, XM1, XM1
vpxor XM3, XM1, XM1
vshufps $0xee, XM1, XM0, XM0
movdqu XM0, 112(ARG1)
vshufps $0x4e, XM0, XM2, XM1
KEY_EXPANSION_HELPER_192 XM1, XM3
vpermilps $0xff, XM0, XM2
vpxor XM1, XM2, XM2
movdqu XM2, 128(ARG1)
vshufps $0x4e, XM2, XM0, XM1
aeskeygenassist $0x20, XM2, XM0
KEY_EXPANSION_HELPER_192 XM1, XM3
vpermilps $0xff, XM0, XM0
vpxor XM1, XM0, XM0
movdqu XM0, 144(ARG1)
vshufps $0x4e, XM0, XM2, XM1
vpslldq $8, XM1, XM2
vpslldq $4, XM2, XM3
vpxor XM3, XM2, XM2
vpermilps $0xff, XM0, XM3
vpxor XM3, XM2, XM2
aeskeygenassist $0x40, XM2, XM3
vpermilps $0xff, XM3, XM3
vpsrldq $8, XM1, XM4
vpslldq $12, XM4, XM4
vpxor XM4, XM1, XM1
vpxor XM3, XM1, XM1
vshufps $0xee, XM1, XM2, XM2
movdqu XM2, 160(ARG1)
vshufps $0x4e, XM2, XM0, XM1
KEY_EXPANSION_HELPER_192 XM1, XM3
vpermilps $0xff, XM2, XM0
vpxor XM1, XM0, XM0
movdqu XM0, 176(ARG1)
vshufps $0x4e, XM0, XM2, XM1
aeskeygenassist $0x80, XM0, XM2
KEY_EXPANSION_HELPER_192 XM1, XM3
vpermilps $0xff, XM2, XM2
vpxor XM1, XM2, XM2
movdqu XM2, 192(ARG1)
vpxor XM0, XM0, XM0
vpxor XM1, XM1, XM1
vpxor XM2, XM2, XM2
vpxor XM3, XM3, XM3
vpxor XM4, XM4, XM4
ret
.cfi_endproc
.size SetEncryptKey192, .-SetEncryptKey192
/**
* Function description: Sets the AES decryption key. Key length: 192 bits.
* Function prototype: void SetDecryptKey192(CRYPT_AES_Key *ctx, const uint8_t *key);
* Input register:
* x0:Pointer to the output key structure.
* x1:Pointer to the input key.
* Change register: xmm0-xmm5
* Output register: None.
* Function/Macro Call: None.
*/
.globl SetDecryptKey192
.type SetDecryptKey192, @function
SetDecryptKey192:
.cfi_startproc
movl $12, 240(ARG1)
movdqu (ARG2), XM0
movdqu 8(ARG2), XM1
movdqu XM0, 192(ARG1)
vpxor XM4, XM4, XM4
vshufps $0x40, XM0, XM4, XM2
aeskeygenassist $0x01, XM1, XM0
vshufps $0xf0, XM0, XM4, XM0
vpslldq $0x04, XM2, XM3
vpxor XM3, XM2, XM2
vpxor XM2, XM0, XM0
vshufps $0xee, XM0, XM1, XM0
aesimc XM0, XM5
movdqu XM5, 176(ARG1)
movdqu XM1, XM2
vpslldq $4, XM2, XM3
vpxor XM3, XM2, XM2
vpslldq $4, XM3, XM3
vpxor XM3, XM2, XM2
vpslldq $4, XM3, XM3
vpxor XM3, XM2, XM2
vpermilps $0xff, XM0, XM3
vpxor XM3, XM2, XM2
aesimc XM2, XM5
movdqu XM5, 160(ARG1)
vshufps $0x4e, XM2, XM0, XM1
aeskeygenassist $0x02, XM2, XM0
KEY_EXPANSION_HELPER_192 XM1, XM3
vpermilps $0xff, XM0, XM0
vpxor XM1, XM0, XM0
aesimc XM0, XM5
movdqu XM5, 144(ARG1)
vshufps $0x4e, XM0, XM2, XM1
vpslldq $8, XM1, XM2
vpslldq $4, XM2, XM3
vpxor XM3, XM2, XM2
vpermilps $0xff, XM0, XM3
vpxor XM3, XM2, XM2
aeskeygenassist $0x04, XM2, XM3
vpermilps $0xff, XM3, XM3
vpsrldq $8, XM1, XM4
vpslldq $12, XM4, XM4
vpxor XM4, XM1, XM1
vpxor XM3, XM1, XM1
vshufps $0xee, XM1, XM2, XM2
aesimc XM2, XM5
movdqu XM5, 128(ARG1)
vshufps $0x4e, XM2, XM0, XM1
KEY_EXPANSION_HELPER_192 XM1, XM3
vpermilps $0xff, XM2, XM0
vpxor XM1, XM0, XM0
aesimc XM0, XM5
movdqu XM5,112(ARG1)
vshufps $0x4e, XM0, XM2, XM1
aeskeygenassist $0x08, XM0, XM2
KEY_EXPANSION_HELPER_192 XM1, XM3
vpermilps $0xff, XM2, XM2
vpxor XM1, XM2, XM2
aesimc XM2, XM5
movdqu XM5, 96(ARG1)
vshufps $0x4e, XM2, XM0, XM1
vpslldq $8, XM1, XM0
vpslldq $4, XM0, XM3
vpxor XM3, XM0, XM0
vpermilps $0xff, XM2, XM3
vpxor XM3, XM0, XM0
aeskeygenassist $0x10, XM0, XM3
vpermilps $0xff, XM3, XM3
vpsrldq $8, XM1, XM4
vpslldq $12, XM4, XM4
vpxor XM4, XM1, XM1
vpxor XM3, XM1, XM1
vshufps $0xee, XM1, XM0, XM0
aesimc XM0, XM5
movdqu XM5, 80(ARG1)
vshufps $0x4e, XM0, XM2, XM1
KEY_EXPANSION_HELPER_192 XM1, XM3
vpermilps $0xff, XM0, XM2
vpxor XM1, XM2, XM2
aesimc XM2, XM5
movdqu XM5, 64(ARG1)
vshufps $0x4e, XM2, XM0, XM1
aeskeygenassist $0x20, XM2, XM0
KEY_EXPANSION_HELPER_192 XM1, XM3
vpermilps $0xff, XM0, XM0
vpxor XM1, XM0, XM0
aesimc XM0, XM5
movdqu XM5, 48(ARG1)
vshufps $0x4e, XM0, XM2, XM1
vpslldq $8, XM1, XM2
vpslldq $4, XM2, XM3
vpxor XM3, XM2, XM2
vpermilps $0xff, XM0, XM3
vpxor XM3, XM2, XM2
aeskeygenassist $0x40, XM2, XM3
vpermilps $0xff, XM3, XM3
vpsrldq $8, XM1, XM4
vpslldq $12, XM4, XM4
vpxor XM4, XM1, XM1
vpxor XM3, XM1, XM1
vshufps $0xee, XM1, XM2, XM2
aesimc XM2, XM5
movdqu XM5, 32(ARG1)
vshufps $0x4e, XM2, XM0, XM1
KEY_EXPANSION_HELPER_192 XM1, XM3
vpermilps $0xff, XM2, XM0
vpxor XM1, XM0, XM0
aesimc XM0, XM5
movdqu XM5, 16(ARG1)
vshufps $0x4e, XM0, XM2, XM1
aeskeygenassist $0x80, XM0, XM2
KEY_EXPANSION_HELPER_192 XM1, XM3
vpermilps $0xff, XM2, XM2
vpxor XM1, XM2, XM2
movdqu XM2,(ARG1)
vpxor XM0, XM0, XM0
vpxor XM1, XM1, XM1
vpxor XM2, XM2, XM2
vpxor XM3, XM3, XM3
vpxor XM4, XM4, XM4
vpxor XM5, XM5, XM5
ret
.cfi_endproc
.size SetDecryptKey192, .-SetDecryptKey192
/**
* Function description: Sets the AES encryption key. Key length: 192 bits.
* Function prototype: void SetEncryptKey256(CRYPT_AES_Key *ctx, const uint8_t *key);
* Input register:
* x0:Pointer to the output key structure.
* x1:Pointer to the input key.
* Change register: xmm0-xmm3.
* Output register: None.
* Function/Macro Call: None.
*/
.globl SetEncryptKey256
.type SetEncryptKey256, @function
SetEncryptKey256:
.cfi_startproc
movl $14, 240(ARG1)
movdqu (ARG2), XM0
movdqu 16(ARG2), XM1
movdqu XM0, (ARG1)
movdqu XM1, 16(ARG1)
aeskeygenassist $0x01, XM1, XM2
vpermilps $0xff, XM2, XM2
vpslldq $4, XM0, XM3
vpxor XM3, XM0, XM0
vpslldq $4, XM3, XM3
vpxor XM3, XM0, XM0
vpslldq $4, XM3, XM3
vpxor XM3, XM0, XM0
vpxor XM0, XM2, XM2
movdqu XM2, 32(ARG1)
aeskeygenassist $0x01, XM2, XM0
vpermilps $0xAA, XM0, XM0
vpslldq $4, XM1, XM3
vpxor XM3, XM1, XM1
vpslldq $4, XM3, XM3
vpxor XM3, XM1, XM1
vpslldq $4, XM3, XM3
vpxor XM3, XM1, XM1
vpxor XM1, XM0, XM0
movdqu XM0, 48(ARG1)
/*2*/
aeskeygenassist $0x02, XM0, XM1
vpermilps $0xff, XM1, XM1
vpslldq $4, XM2, XM3
vpxor XM3, XM2, XM2
vpslldq $4, XM3, XM3
vpxor XM3, XM2, XM2
vpslldq $4, XM3, XM3
vpxor XM3, XM2, XM2
vpxor XM2, XM1, XM1
movdqu XM1, 64(ARG1)
aeskeygenassist $0x02, XM1, XM2
vpermilps $0xAA, XM2, XM2
vpslldq $4, XM0, XM3
vpxor XM3, XM0, XM0
vpslldq $4, XM3, XM3
vpxor XM3, XM0, XM0
vpslldq $4, XM3, XM3
vpxor XM3, XM0, XM0
vpxor XM0, XM2, XM2
movdqu XM2, 80(ARG1)
/*3*/
aeskeygenassist $0x04, XM2, XM0
vpermilps $0xff, XM0, XM0
vpslldq $4, XM1, XM3
vpxor XM3, XM1, XM1
vpslldq $4, XM3, XM3
vpxor XM3, XM1, XM1
vpslldq $4, XM3, XM3
vpxor XM3, XM1, XM1
vpxor XM1, XM0, XM0
movdqu XM0, 96(ARG1)
aeskeygenassist $0x04, XM0, XM1
vpermilps $0xAA, XM1, XM1
vpslldq $4, XM2, XM3
vpxor XM3, XM2, XM2
vpslldq $4, XM3, XM3
vpxor XM3, XM2, XM2
vpslldq $4, XM3, XM3
vpxor XM3, XM2, XM2
vpxor XM2, XM1, XM1
movdqu XM1, 112(ARG1)
/*4*/
aeskeygenassist $0x08, XM1, XM2
vpermilps $0xff, XM2, XM2
vpslldq $4, XM0, XM3
vpxor XM3, XM0, XM0
vpslldq $4, XM3, XM3
vpxor XM3, XM0, XM0
vpslldq $4, XM3, XM3
vpxor XM3, XM0, XM0
vpxor XM0, XM2, XM2
movdqu XM2, 128(ARG1)
aeskeygenassist $0x08, XM2, XM0
vpermilps $0xAA, XM0, XM0
vpslldq $4, XM1, XM3
vpxor XM3, XM1, XM1
vpslldq $4, XM3, XM3
vpxor XM3, XM1, XM1
vpslldq $4, XM3, XM3
vpxor XM3, XM1, XM1
vpxor XM1, XM0, XM0
movdqu XM0, 144(ARG1)
/*5*/
aeskeygenassist $0x10, XM0, XM1
vpermilps $0xff, XM1, XM1
vpslldq $4, XM2, XM3
vpxor XM3, XM2, XM2
vpslldq $4, XM3, XM3
vpxor XM3, XM2, XM2
vpslldq $4, XM3, XM3
vpxor XM3, XM2, XM2
vpxor XM2, XM1, XM1
movdqu XM1, 160(ARG1)
aeskeygenassist $0x10, XM1, XM2
vpermilps $0xAA, XM2, XM2
vpslldq $4, XM0, XM3
vpxor XM3, XM0, XM0
vpslldq $4, XM3, XM3
vpxor XM3, XM0, XM0
vpslldq $4, XM3, XM3
vpxor XM3, XM0, XM0
vpxor XM0, XM2, XM2
movdqu XM2, 176(ARG1)
/*6*/
aeskeygenassist $0x20, XM2, XM0
vpermilps $0xff, XM0, XM0
vpslldq $4, XM1, XM3
vpxor XM3, XM1, XM1
vpslldq $4, XM3, XM3
vpxor XM3, XM1, XM1
vpslldq $4, XM3, XM3
vpxor XM3, XM1, XM1
vpxor XM1, XM0, XM0
movdqu XM0, 192(ARG1)
aeskeygenassist $0x20, XM0, XM1
vpermilps $0xAA, XM1, XM1
vpslldq $4, XM2, XM3
vpxor XM3, XM2, XM2
vpslldq $4, XM3, XM3
vpxor XM3, XM2, XM2
vpslldq $4, XM3, XM3
vpxor XM3, XM2, XM2
vpxor XM2, XM1, XM1
movdqu XM1, 208(ARG1)
/*7*/
aeskeygenassist $0x40, XM1, XM2
vpermilps $0xff, XM2, XM2
vpslldq $4, XM0, XM3
vpxor XM3, XM0, XM0
vpslldq $4, XM3, XM3
vpxor XM3, XM0, XM0
vpslldq $4, XM3, XM3
vpxor XM3, XM0, XM0
vpxor XM0, XM2, XM2
movdqu XM2, 224(ARG1)
vpxor XM0, XM0, XM0
vpxor XM1, XM1, XM1
vpxor XM2, XM2, XM2
vpxor XM3, XM3, XM3
ret
.cfi_endproc
.size SetEncryptKey256, .-SetEncryptKey256
/**
* Function description: Sets the AES encryption key. Key length: 192 bits.
* Function prototype: void SetDecryptKey256(CRYPT_AES_Key *ctx, const uint8_t *key);
* Input register:
* x0:Pointer to the output key structure.
* x1:Pointer to the input key.
* Change register: xmm0-xmm4.
* Output register: None.
* Function/Macro Call: None.
*/
.globl SetDecryptKey256
.type SetDecryptKey256, @function
SetDecryptKey256:
.cfi_startproc
movl $14, 240(ARG1)
movdqu (ARG2), XM0
movdqu 16(ARG2), XM1
movdqu XM0, 224(ARG1)
aesimc XM1, XM4
movdqu XM4, 208(ARG1)
aeskeygenassist $0x01, XM1, XM2
vpermilps $0xff, XM2, XM2
vpslldq $4, XM0, XM3
vpxor XM3, XM0, XM0
vpslldq $4, XM3, XM3
vpxor XM3, XM0, XM0
vpslldq $4, XM3, XM3
vpxor XM3, XM0, XM0
vpxor XM0, XM2, XM2
aesimc XM2, XM4
movdqu XM4, 192(ARG1)
aeskeygenassist $0x01, XM2, XM0
vpermilps $0xAA, XM0, XM0
vpslldq $4, XM1, XM3
vpxor XM3, XM1, XM1
vpslldq $4, XM3, XM3
vpxor XM3, XM1, XM1
vpslldq $4, XM3, XM3
vpxor XM3, XM1, XM1
vpxor XM1, XM0, XM0
aesimc XM0, XM4
movdqu XM4, 176(ARG1)
/*2*/
aeskeygenassist $0x02, XM0, XM1
vpermilps $0xff, XM1, XM1
vpslldq $4, XM2, XM3
vpxor XM3, XM2, XM2
vpslldq $4, XM3, XM3
vpxor XM3, XM2, XM2
vpslldq $4, XM3, XM3
vpxor XM3, XM2, XM2
vpxor XM2, XM1, XM1
aesimc XM1, XM4
movdqu XM4, 160(ARG1)
aeskeygenassist $0x02, XM1, XM2
vpermilps $0xAA, XM2, XM2
vpslldq $4, XM0, XM3
vpxor XM3, XM0, XM0
vpslldq $4, XM3, XM3
vpxor XM3, XM0, XM0
vpslldq $4, XM3, XM3
vpxor XM3, XM0, XM0
vpxor XM0, XM2, XM2
aesimc XM2, XM4
movdqu XM4, 144(ARG1)
/*3*/
aeskeygenassist $0x04, XM2, XM0
vpermilps $0xff, XM0, XM0
vpslldq $4, XM1, XM3
vpxor XM3, XM1, XM1
vpslldq $4, XM3, XM3
vpxor XM3, XM1, XM1
vpslldq $4, XM3, XM3
vpxor XM3, XM1, XM1
vpxor XM1, XM0, XM0
aesimc XM0, XM4
movdqu XM4, 128(ARG1)
aeskeygenassist $0x04, XM0, XM1
vpermilps $0xAA, XM1, XM1
vpslldq $4, XM2, XM3
vpxor XM3, XM2, XM2
vpslldq $4, XM3, XM3
vpxor XM3, XM2, XM2
vpslldq $4, XM3, XM3
vpxor XM3, XM2, XM2
vpxor XM2, XM1, XM1
aesimc XM1, XM4
movdqu XM4, 112(ARG1)
/*4*/
aeskeygenassist $0x08, XM1, XM2
vpermilps $0xff, XM2, XM2
vpslldq $4, XM0, XM3
vpxor XM3, XM0, XM0
vpslldq $4, XM3, XM3
vpxor XM3, XM0, XM0
vpslldq $4, XM3, XM3
vpxor XM3, XM0, XM0
vpxor XM0, XM2, XM2
aesimc XM2, XM4
movdqu XM4, 96(ARG1)
aeskeygenassist $0x08, XM2, XM0
vpermilps $0xAA, XM0, XM0
vpslldq $4, XM1, XM3
vpxor XM3, XM1, XM1
vpslldq $4, XM3, XM3
vpxor XM3, XM1, XM1
vpslldq $4, XM3, XM3
vpxor XM3, XM1, XM1
vpxor XM1, XM0, XM0
aesimc XM0, XM4
movdqu XM4, 80(ARG1)
/*5*/
aeskeygenassist $0x10, XM0, XM1
vpermilps $0xff, XM1, XM1
vpslldq $4, XM2, XM3
vpxor XM3, XM2, XM2
vpslldq $4, XM3, XM3
vpxor XM3, XM2, XM2
vpslldq $4, XM3, XM3
vpxor XM3, XM2, XM2
vpxor XM2, XM1, XM1
aesimc XM1, XM4
movdqu XM4, 64(ARG1)
aeskeygenassist $0x10, XM1, XM2
vpermilps $0xAA, XM2, XM2
vpslldq $4, XM0, XM3
vpxor XM3, XM0, XM0
vpslldq $4, XM3, XM3
vpxor XM3, XM0, XM0
vpslldq $4, XM3, XM3
vpxor XM3, XM0, XM0
vpxor XM0, XM2, XM2
aesimc XM2, XM4
movdqu XM4, 48(ARG1)
/*6*/
aeskeygenassist $0x20, XM2, XM0
vpermilps $0xff, XM0, XM0
vpslldq $4, XM1, XM3
vpxor XM3, XM1, XM1
vpslldq $4, XM3, XM3
vpxor XM3, XM1, XM1
vpslldq $4, XM3, XM3
vpxor XM3, XM1, XM1
vpxor XM1, XM0, XM0
aesimc XM0, XM4
movdqu XM4, 32(ARG1)
aeskeygenassist $0x20, XM0, XM1
vpermilps $0xAA, XM1, XM1
vpslldq $4, XM2, XM3
vpxor XM3, XM2, XM2
vpslldq $4, XM3, XM3
vpxor XM3, XM2, XM2
vpslldq $4, XM3, XM3
vpxor XM3, XM2, XM2
vpxor XM2, XM1, XM1
aesimc XM1, XM4
movdqu XM4, 16(ARG1)
/*7*/
aeskeygenassist $0x40, XM1, XM2
vpermilps $0xff, XM2, XM2
vpslldq $4, XM0, XM3
vpxor XM3, XM0, XM0
vpslldq $4, XM3, XM3
vpxor XM3, XM0, XM0
vpslldq $4, XM3, XM3
vpxor XM3, XM0, XM0
vpxor XM0, XM2, XM2
movdqu XM2, (ARG1)
vpxor XM0, XM0, XM0
vpxor XM1, XM1, XM1
vpxor XM2, XM2, XM2
vpxor XM3, XM3, XM3
vpxor XM4, XM4, XM4
ret
.cfi_endproc
.size SetDecryptKey256, .-SetDecryptKey256
/**
* Function description: This API is used to set the AES encryption assembly acceleration.
* Function prototype: int32_t CRYPT_AES_Encrypt(const CRYPT_AES_Key *ctx, const uint8_t *in, uint8_t *out, uint32_t len);
* Input register:
* x0:Pointer to the input key structure.
* x1:Points to the 128-bit input data.
* x2:Points to the 128-bit output data.
* x3:Indicates the length of a data block, that is, 16 bytes.
* Change register: xmm0-xmm1.
* Output register: eax.
* Function/Macro Call: None.
*/
.globl CRYPT_AES_Encrypt
.type CRYPT_AES_Encrypt, @function
CRYPT_AES_Encrypt:
.cfi_startproc
.set ROUNDS,%eax
movdqu (ARG2), XM0
movl 240(ARG1),ROUNDS
vpxor (ARG1), XM0, XM0
movdqu 16(ARG1), XM1
aesenc XM1, XM0
movdqu 32(ARG1), XM1
aesenc XM1, XM0
movdqu 48(ARG1), XM1
aesenc XM1, XM0
movdqu 64(ARG1), XM1
aesenc XM1, XM0
movdqu 80(ARG1), XM1
aesenc XM1, XM0
movdqu 96(ARG1), XM1
aesenc XM1, XM0
movdqu 112(ARG1), XM1
aesenc XM1, XM0
movdqu 128(ARG1), XM1
aesenc XM1, XM0
movdqu 144(ARG1), XM1
aesenc XM1, XM0
cmpl $10,ROUNDS
je .Laesenc_128
movdqu 160(ARG1), XM1
aesenc XM1, XM0
movdqu 176(ARG1), XM1
aesenc XM1, XM0
cmpl $12,ROUNDS
je .Laesenc_192
movdqu 192(ARG1), XM1
aesenc XM1, XM0
movdqu 208(ARG1), XM1
aesenc XM1, XM0
cmpl $14,ROUNDS
je .Laesenc_256
.Laesenc_128:
movdqu 160(ARG1), XM1
aesenclast XM1, XM0
jmp .Laesenc_end
.Laesenc_192:
movdqu 192(ARG1), XM1
aesenclast XM1, XM0
jmp .Laesenc_end
.Laesenc_256:
movdqu 224(ARG1), XM1
aesenclast XM1, XM0
.Laesenc_end:
vpxor XM1, XM1, XM1
movdqu XM0,(ARG3)
vpxor XM0, XM0, XM0
movl $0,RET
ret
.cfi_endproc
.size CRYPT_AES_Encrypt, .-CRYPT_AES_Encrypt
/**
* Function description: AES decryption and assembly acceleration API.
* Function prototype: int32_t CRYPT_AES_Decrypt(const CRYPT_AES_Key *ctx, const uint8_t *in, uint8_t *out, uint32_t len);
* Input register:
* x0:Pointer to the input key structure.
* x1:Points to the 128-bit input data.
* x2:Points to the 128-bit output data.
* x3:Indicates the length of a data block, that is, 16 bytes.
* Change register: xmm0-xmm1.
* Output register: eax.
* Function/Macro Call: None.
*/
.globl CRYPT_AES_Decrypt
.type CRYPT_AES_Decrypt, @function
CRYPT_AES_Decrypt:
.cfi_startproc
.set ROUNDS,%eax
movdqu (ARG2), XM0
movl 240(ARG1),ROUNDS
vpxor (ARG1), XM0, XM0
movdqu 16(ARG1), XM1
aesdec XM1, XM0
movdqu 32(ARG1), XM1
aesdec XM1, XM0
movdqu 48(ARG1), XM1
aesdec XM1, XM0
movdqu 64(ARG1), XM1
aesdec XM1, XM0
movdqu 80(ARG1), XM1
aesdec XM1, XM0
movdqu 96(ARG1), XM1
aesdec XM1, XM0
movdqu 112(ARG1), XM1
aesdec XM1, XM0
movdqu 128(ARG1), XM1
aesdec XM1, XM0
movdqu 144(ARG1), XM1
aesdec XM1, XM0
cmpl $10,ROUNDS
je .aesdec_128
movdqu 160(ARG1), XM1
aesdec XM1, XM0
movdqu 176(ARG1), XM1
aesdec XM1, XM0
cmpl $12,ROUNDS
je .aesdec_192
movdqu 192(ARG1), XM1
aesdec XM1, XM0
movdqu 208(ARG1), XM1
aesdec XM1, XM0
cmpl $14,ROUNDS
je .aesdec_256
.aesdec_128:
movdqu 160(ARG1), XM1
aesdeclast XM1, XM0
jmp .aesdec_end
.aesdec_192:
movdqu 192(ARG1), XM1
aesdeclast XM1, XM0
jmp .aesdec_end
.aesdec_256:
movdqu 224(ARG1), XM1
aesdeclast XM1, XM0
.aesdec_end:
vpxor XM1, XM1, XM1
movdqu XM0,(ARG3)
vpxor XM0, XM0, XM0
movl $0,RET
ret
.cfi_endproc
.size CRYPT_AES_Decrypt, .-CRYPT_AES_Decrypt
#endif
| 2302_82127028/openHiTLS-examples_1508 | crypto/aes/src/asm/crypt_aes_x86_64.S | Motorola 68K Assembly | unknown | 25,400 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#include "hitls_build.h"
#if defined(HITLS_CRYPTO_AES) && defined(HITLS_CRYPTO_XTS)
#include "crypt_aes_macro_armv8.s"
#include "crypt_arm.h"
.file "crypt_aes_xts_armv8.S"
.text
.arch armv8-a+crypto
KEY .req x0
IN .req x1
OUT .req x2
LEN .req x3
TWEAK .req x4
TMPOUT .req x17
WP .req w11
WC .req w12
KTMP .req x5
LTMP .req x6
TAILNUM .req x8
POS .req x16
ROUNDS .req w7
XROUNDS .req x7
TROUNDS .req w15
WTMP0 .req w9
WTMP1 .req w10
WTMP2 .req w11
WTMP3 .req w12
XTMP1 .req x10
XTMP2 .req x11
TWX0 .req x13
TWX1 .req x14
TWW1 .req w14
BLK0 .req v0
BLK1 .req v1
BLK2 .req v2
BLK3 .req v3
BLK4 .req v4
IN0 .req v5
IN1 .req v6
IN2 .req v7
IN3 .req v30
IN4 .req v31
TWK0 .req v8
TWK1 .req v9
TWK2 .req v10
TWK3 .req v11
TWK4 .req v12
TWKD00 .req d8
TWKD10 .req d9
TWKD20 .req d10
TWKD30 .req d11
TWKD40 .req d12
#define TWKD01 v8.d[1]
#define TWKD11 v9.d[1]
#define TWKD21 v10.d[1]
#define TWKD31 v11.d[1]
#define TWKD41 v12.d[1]
RDK0 .req v16
RDK1 .req v17
RDK2 .req v18
RDK3 .req v19
RDK4 .req v20
RDK5 .req v21
RDK6 .req v22
RDK7 .req v23
RDK8 .req v24
TMP0 .req v25
TMP1 .req v26
TMP2 .req v27
TMP3 .req v28
TMP4 .req v29
#define MOV_REG_TO_VEC(SRC0, SRC1, DES0, DES1) \
fmov DES0,SRC0 ; \
fmov DES1,SRC1 ; \
.macro NextTweak twkl, twkh, twkd0, twkd1
asr XTMP2,\twkh,#63
extr \twkh,\twkh,\twkl,#63
and WTMP1,WTMP0,WTMP2
eor \twkl,XTMP1,\twkl,lsl#1
fmov \twkd0,\twkl // must set lower bits of 'q' register first.1
fmov \twkd1,\twkh // Set lower bits using 'd' register will clear higer bits.
.endm
.macro AesCrypt1x en, mc, d0, rk
aes\en \d0\().16b, \rk\().16b
aes\mc \d0\().16b, \d0\().16b
.endm
.macro AesEncrypt1x d0, rk
AesCrypt1x e, mc, \d0, \rk
.endm
.macro AesDecrypt1x d0, rk
AesCrypt1x d, imc, \d0, \rk
.endm
/**
* int32_t CRYPT_AES_XTS_Encrypt(const CRYPT_AES_Key *ctx, const uint8_t *in, uint8_t *out, uint32_t len, const uint8_t *tweak);
*/
.globl CRYPT_AES_XTS_Encrypt
.type CRYPT_AES_XTS_Encrypt, %function
.align 4
CRYPT_AES_XTS_Encrypt:
AARCH64_PACIASP
stp x29, x30, [sp,#-80]!
add x29, sp, #0
stp d8, d9, [sp,#16]
stp d10, d11, [sp,#32]
stp d12, d13, [sp,#48]
stp d14, d15, [sp,#64]
ld1 {TWK0.16b}, [TWEAK]
and TAILNUM, LEN, #0xF // get tail num, LEN % 16
and LTMP, LEN, #-16
mov WTMP0,0x87
ldr ROUNDS,[KEY,#240]
fmov TWX0,TWKD00
fmov TWX1,TWKD01
sub ROUNDS,ROUNDS,#6 // perload last 7 rounds key
add KTMP,KEY,XROUNDS,lsl#4
ld1 {RDK2.4s,RDK3.4s},[KTMP],#32
ld1 {RDK4.4s,RDK5.4s},[KTMP],#32
ld1 {RDK6.4s,RDK7.4s},[KTMP],#32
ld1 {RDK8.4s},[KTMP]
.Lxts_aesenc_start:
cmp LTMP, #80
b.ge .Lxts_enc_proc_5_blks
cmp LTMP, #48
b.ge .Lxts_enc_proc_3_blks
cmp LTMP, #32
b.eq .Lxts_enc_proc_2_blks
cmp LTMP, #16
b.eq .Lxts_enc_proc_1blk
.Lxtx_tail_blk:
fmov TWX0,TWKD00 // reset already computed tweak
fmov TWX1,TWKD01
cbz TAILNUM,.Lxts_aesenc_finish
// prepare encrypt tail block
sub TMPOUT,OUT,#16
.Lxtx_tail_blk_loop:
subs TAILNUM,TAILNUM,1
ldrb WC,[TMPOUT,TAILNUM]
ldrb WP,[IN,TAILNUM]
strb WC,[OUT,TAILNUM]
strb WP,[TMPOUT,TAILNUM]
b.gt .Lxtx_tail_blk_loop
ld1 {BLK0.16b}, [TMPOUT]
mov LTMP,#16
mov OUT,TMPOUT
b .Lxts_enc_proc_1blk_loaded
cbz LTMP,.Lxts_aesenc_finish
.Lxts_enc_proc_1blk:
ld1 {BLK0.16b},[IN],#16
.Lxts_enc_proc_1blk_loaded:
eor BLK0.16b,BLK0.16b,TWK0.16b
mov KTMP, KEY
ld1 {RDK0.4s,RDK1.4s},[KTMP],#32
sub TROUNDS,ROUNDS,#2
.Lxts_rounds_1blks:
AesEncrypt1x BLK0,RDK0
ld1 {RDK0.4s},[KTMP],#16
subs TROUNDS,TROUNDS,#2
AesEncrypt1x BLK0,RDK1
ld1 {RDK1.4s},[KTMP],#16
b.gt .Lxts_rounds_1blks
AesEncrypt1x BLK0,RDK0
AesEncrypt1x BLK0,RDK1
// last 7 rounds
AesEncrypt1x BLK0,RDK2
AesEncrypt1x BLK0,RDK3
AesEncrypt1x BLK0,RDK4
AesEncrypt1x BLK0,RDK5
AesEncrypt1x BLK0,RDK6
aese BLK0.16b,RDK7.16b // final round
eor BLK0.16b,BLK0.16b,RDK8.16b
eor BLK0.16b,BLK0.16b,TWK0.16b
st1 {BLK0.16b}, [OUT], #16
NextTweak TWX0,TWX1,TWKD00,TWKD01
subs LTMP,LTMP,#16
b.hs .Lxts_aesenc_start
.Lxts_enc_proc_2_blks:
ld1 {BLK0.16b, BLK1.16b}, [IN], #32
mov KTMP, KEY
NextTweak TWX0,TWX1,TWKD10,TWKD11
ld1 {RDK0.4s,RDK1.4s},[KTMP],#32
sub TROUNDS,ROUNDS,#2
eor BLK0.16b, BLK0.16b, TWK0.16b
eor BLK1.16b, BLK1.16b, TWK1.16b
.Lxts_rounds_2blks:
AesEncrypt1x BLK0,RDK0
AesEncrypt1x BLK1,RDK0
ld1 {RDK0.4s},[KTMP],#16
subs TROUNDS,TROUNDS,#2
AesEncrypt1x BLK0,RDK1
AesEncrypt1x BLK1,RDK1
ld1 {RDK1.4s},[KTMP],#16
b.gt .Lxts_rounds_2blks
AesEncrypt1x BLK0,RDK0
AesEncrypt1x BLK1,RDK0
AesEncrypt1x BLK0,RDK1
AesEncrypt1x BLK1,RDK1
// last 7 rounds
AesEncrypt1x BLK0,RDK2
AesEncrypt1x BLK1,RDK2
AesEncrypt1x BLK0,RDK3
AesEncrypt1x BLK1,RDK3
AesEncrypt1x BLK0,RDK4
AesEncrypt1x BLK1,RDK4
AesEncrypt1x BLK0,RDK5
AesEncrypt1x BLK1,RDK5
AesEncrypt1x BLK0,RDK6
AesEncrypt1x BLK1,RDK6
eor TWK0.16b,TWK0.16b,RDK8.16b
eor TWK1.16b,TWK1.16b,RDK8.16b
aese BLK0.16b,RDK7.16b // final round
aese BLK1.16b,RDK7.16b
eor BLK0.16b,BLK0.16b,TWK0.16b
eor BLK1.16b,BLK1.16b,TWK1.16b
st1 {BLK0.16b, BLK1.16b}, [OUT], #32
NextTweak TWX0,TWX1,TWKD00,TWKD01
subs LTMP,LTMP,#32
b.hs .Lxts_aesenc_start
.Lxts_enc_proc_3_blks:
ld1 {BLK0.16b}, [IN], #16 // first block
NextTweak TWX0,TWX1,TWKD10,TWKD11
eor BLK0.16b,BLK0.16b,TWK0.16b
ld1 {BLK1.16b}, [IN], #16 // second block
NextTweak TWX0,TWX1,TWKD20,TWKD21
eor BLK1.16b,BLK1.16b,TWK1.16b
ld1 {BLK2.16b}, [IN], #16 // third block
eor BLK2.16b,BLK2.16b,TWK2.16b
mov KTMP, KEY
ld1 {RDK0.4s,RDK1.4s},[KTMP],#32
sub TROUNDS,ROUNDS,#2
.Lxts_rounds_3blks:
AesEncrypt1x BLK0,RDK0
AesEncrypt1x BLK1,RDK0
AesEncrypt1x BLK2,RDK0
ld1 {RDK0.4s},[KTMP],#16
subs TROUNDS,TROUNDS,#2
AesEncrypt1x BLK0,RDK1
AesEncrypt1x BLK1,RDK1
AesEncrypt1x BLK2,RDK1
ld1 {RDK1.4s},[KTMP],#16
b.gt .Lxts_rounds_3blks
AesEncrypt1x BLK0,RDK0
AesEncrypt1x BLK1,RDK0
AesEncrypt1x BLK2,RDK0
AesEncrypt1x BLK0,RDK1
AesEncrypt1x BLK1,RDK1
AesEncrypt1x BLK2,RDK1
// last 7 rounds
AesEncrypt1x BLK0,RDK2
AesEncrypt1x BLK1,RDK2
AesEncrypt1x BLK2,RDK2
AesEncrypt1x BLK0,RDK3
AesEncrypt1x BLK1,RDK3
AesEncrypt1x BLK2,RDK3
AesEncrypt1x BLK0,RDK4
AesEncrypt1x BLK1,RDK4
AesEncrypt1x BLK2,RDK4
AesEncrypt1x BLK0,RDK5
AesEncrypt1x BLK1,RDK5
AesEncrypt1x BLK2,RDK5
AesEncrypt1x BLK0,RDK6
AesEncrypt1x BLK1,RDK6
AesEncrypt1x BLK2,RDK6
eor TWK0.16b,TWK0.16b,RDK8.16b
eor TWK1.16b,TWK1.16b,RDK8.16b
eor TWK2.16b,TWK2.16b,RDK8.16b
aese BLK0.16b,RDK7.16b
aese BLK1.16b,RDK7.16b
aese BLK2.16b,RDK7.16b
eor BLK0.16b,BLK0.16b,TWK0.16b
eor BLK1.16b,BLK1.16b,TWK1.16b
eor BLK2.16b,BLK2.16b,TWK2.16b
st1 {BLK0.16b, BLK1.16b, BLK2.16b}, [OUT], #48
NextTweak TWX0,TWX1,TWKD00,TWKD01
subs LTMP,LTMP,#48
b.hs .Lxts_aesenc_start
.align 4
.Lxts_enc_proc_5_blks:
ld1 {BLK0.16b}, [IN], #16 // first block
NextTweak TWX0,TWX1,TWKD10,TWKD11
eor BLK0.16b,BLK0.16b,TWK0.16b
ld1 {BLK1.16b}, [IN], #16 // second block
NextTweak TWX0,TWX1,TWKD20,TWKD21
eor BLK1.16b,BLK1.16b,TWK1.16b
sub LTMP,LTMP,#32
ld1 {BLK2.16b}, [IN], #16 // third block
NextTweak TWX0,TWX1,TWKD30,TWKD31
eor BLK2.16b,BLK2.16b,TWK2.16b
ld1 {BLK3.16b}, [IN], #16 // fourth block
NextTweak TWX0,TWX1,TWKD40,TWKD41
eor BLK3.16b,BLK3.16b,TWK3.16b
sub LTMP,LTMP,#32
ld1 {BLK4.16b}, [IN], #16 // fifth block
eor BLK4.16b, BLK4.16b, TWK4.16b
sub LTMP,LTMP,#16
mov KTMP, KEY
ld1 {RDK0.4s,RDK1.4s},[KTMP],#32
sub TROUNDS,ROUNDS,#2
.align 4
.Lxts_rounds_5blks:
AesEncrypt1x BLK0,RDK0
AesEncrypt1x BLK1,RDK0
AesEncrypt1x BLK2,RDK0
AesEncrypt1x BLK3,RDK0
AesEncrypt1x BLK4,RDK0
ld1 {RDK0.4s},[KTMP],#16
subs TROUNDS,TROUNDS,#2
AesEncrypt1x BLK0,RDK1
AesEncrypt1x BLK1,RDK1
AesEncrypt1x BLK2,RDK1
AesEncrypt1x BLK3,RDK1
AesEncrypt1x BLK4,RDK1
ld1 {RDK1.4s},[KTMP],#16
b.gt .Lxts_rounds_5blks
AesEncrypt1x BLK0,RDK0
AesEncrypt1x BLK1,RDK0
AesEncrypt1x BLK2,RDK0
AesEncrypt1x BLK3,RDK0
AesEncrypt1x BLK4,RDK0
subs LTMP,LTMP,#80
AesEncrypt1x BLK0,RDK1
AesEncrypt1x BLK1,RDK1
AesEncrypt1x BLK2,RDK1
AesEncrypt1x BLK3,RDK1
AesEncrypt1x BLK4,RDK1
// last 7 rounds
AesEncrypt1x BLK0,RDK2
AesEncrypt1x BLK1,RDK2
AesEncrypt1x BLK2,RDK2
AesEncrypt1x BLK3,RDK2
AesEncrypt1x BLK4,RDK2
csel POS,xzr,LTMP,gt //
AesEncrypt1x BLK0,RDK3
AesEncrypt1x BLK1,RDK3
AesEncrypt1x BLK2,RDK3
AesEncrypt1x BLK3,RDK3
AesEncrypt1x BLK4,RDK3
add IN,IN,POS
AesEncrypt1x BLK0,RDK4
AesEncrypt1x BLK1,RDK4
AesEncrypt1x BLK2,RDK4
AesEncrypt1x BLK3,RDK4
AesEncrypt1x BLK4,RDK4
AesEncrypt1x BLK0,RDK5
AesEncrypt1x BLK1,RDK5
AesEncrypt1x BLK2,RDK5
AesEncrypt1x BLK3,RDK5
AesEncrypt1x BLK4,RDK5
AesEncrypt1x BLK0,RDK6
AesEncrypt1x BLK1,RDK6
AesEncrypt1x BLK2,RDK6
AesEncrypt1x BLK3,RDK6
AesEncrypt1x BLK4,RDK6
eor TMP0.16b,TWK0.16b,RDK8.16b
aese BLK0.16b,RDK7.16b // final round
NextTweak TWX0,TWX1,TWKD00,TWKD01 // perform operations of next 5blks in advance
eor TMP1.16b,TWK1.16b,RDK8.16b
ld1 {IN0.16b}, [IN], #16
aese BLK1.16b,RDK7.16b
NextTweak TWX0,TWX1,TWKD10,TWKD11
eor TMP2.16b,TWK2.16b,RDK8.16b
ld1 {IN1.16b}, [IN], #16
aese BLK2.16b,RDK7.16b
NextTweak TWX0,TWX1,TWKD20,TWKD21
eor TMP3.16b,TWK3.16b,RDK8.16b
ld1 {IN2.16b}, [IN], #16
aese BLK3.16b,RDK7.16b
NextTweak TWX0,TWX1,TWKD30,TWKD31
eor TMP4.16b,TWK4.16b,RDK8.16b
ld1 {IN3.16b}, [IN], #16
aese BLK4.16b,RDK7.16b
NextTweak TWX0,TWX1,TWKD40,TWKD41
ld1 {IN4.16b}, [IN], #16
mov KTMP, KEY
ld1 {RDK0.4s,RDK1.4s},[KTMP],#32
eor TMP0.16b,TMP0.16b,BLK0.16b
eor BLK0.16b,IN0.16b,TWK0.16b // blk0 = in0 ^ twk0
eor TMP1.16b,TMP1.16b,BLK1.16b
eor BLK1.16b,IN1.16b,TWK1.16b
st1 {TMP0.16b}, [OUT], #16
eor TMP2.16b,TMP2.16b,BLK2.16b
eor BLK2.16b,IN2.16b,TWK2.16b
eor TMP3.16b,TMP3.16b,BLK3.16b
eor BLK3.16b,IN3.16b,TWK3.16b
st1 {TMP1.16b}, [OUT], #16
eor TMP4.16b,TMP4.16b,BLK4.16b
eor BLK4.16b,IN4.16b,TWK4.16b
st1 {TMP2.16b}, [OUT], #16
sub TROUNDS,ROUNDS,#2
st1 {TMP3.16b,TMP4.16b}, [OUT], #32
b.hs .Lxts_rounds_5blks
add LTMP,LTMP,#80 // add 5 blocks length back if LTMP < 0
cbz LTMP,.Lxtx_tail_blk
cmp LTMP, #16
b.eq .Lxts_pre_last_1blks
cmp LTMP,#32
b.eq .Lxts_pre_last_2blks
cmp LTMP,#48
b.eq .Lxts_pre_last_3blks
cmp LTMP,#64
b.eq .Lxts_pre_last_4blks
.Lxts_pre_last_1blks:
eor IN0.16b,IN0.16b,IN4.16b //in0 = in0 ^ in41
eor BLK0.16b,BLK0.16b,IN0.16b // blk0 = in0 ^ twk0 ^ in0 ^ in4
fmov TWX0,TWKD00 // reset already computed tweak
fmov TWX1,TWKD01
b .Lxts_rounds_1blks
.Lxts_pre_last_2blks:
eor BLK0.16b,BLK0.16b,IN0.16b
eor BLK1.16b,BLK1.16b,IN1.16b
eor BLK0.16b,BLK0.16b,IN3.16b // in3 -> blk0
eor BLK1.16b,BLK1.16b,IN4.16b // in4 -> blk1
fmov TWX0,TWKD10 // reset already computed tweak
fmov TWX1,TWKD11
b .Lxts_rounds_2blks
.Lxts_pre_last_3blks:
eor BLK0.16b,BLK0.16b,IN0.16b
eor BLK1.16b,BLK1.16b,IN1.16b
eor BLK2.16b,BLK2.16b,IN2.16b
eor BLK0.16b,BLK0.16b,IN2.16b // in2 -> blk0
eor BLK1.16b,BLK1.16b,IN3.16b // in3 -> blk1
eor BLK2.16b,BLK2.16b,IN4.16b // in4 -> blk2
fmov TWX0,TWKD20 // reset already computed tweak
fmov TWX1,TWKD21
b .Lxts_rounds_3blks
.Lxts_pre_last_4blks:
eor BLK0.16b,BLK0.16b,IN0.16b
eor BLK1.16b,BLK1.16b,IN1.16b
eor BLK2.16b,BLK2.16b,IN2.16b
eor BLK3.16b,BLK3.16b,IN3.16b
sub IN,IN,#16 // have loaded 4blks, using 3blks to process, so step back 1blk here
eor BLK0.16b,BLK0.16b,IN1.16b // in1 -> blk0
eor BLK1.16b,BLK1.16b,IN2.16b // in2 -> blk1
eor BLK2.16b,BLK2.16b,IN3.16b // in3 -> blk2
eor BLK3.16b,BLK3.16b,IN4.16b // in4 -> blk3
fmov TWX0,TWKD20 // reset already computed tweak
fmov TWX1,TWKD21
b .Lxts_rounds_3blks
.Lxts_aesenc_finish:
MOV_REG_TO_VEC(TWX0,TWX1,TWKD00,TWKD01)
st1 {TWK0.16b}, [TWEAK]
mov x0, #0 // return value ? no need
ldp d14, d15, [sp,#64]
ldp d12, d13, [sp, #48]
ldp d10, d11, [sp, #32]
ldp d8, d9, [sp, #16]
ldp x29, x30, [sp], #80
AARCH64_AUTIASP
ret
.size CRYPT_AES_XTS_Encrypt, .-CRYPT_AES_XTS_Encrypt
/**
* int32_t CRYPT_AES_XTS_Decrypt(const CRYPT_AES_Key *ctx, const uint8_t *in, uint8_t *out, uint32_t len, const uint8_t *t);
*/
.globl CRYPT_AES_XTS_Decrypt
.type CRYPT_AES_XTS_Decrypt, %function
.align 4
CRYPT_AES_XTS_Decrypt:
AARCH64_PACIASP
stp x29, x30, [sp,#-80]!
add x29, sp, #0
stp d8, d9, [sp,#16]
stp d10, d11, [sp,#32]
stp d12, d13, [sp,#48]
stp d14, d15, [sp,#64]
ld1 {TWK0.16b}, [TWEAK]
and LTMP, LEN, #-16
ands TAILNUM, LEN, #0xF // get tail num, LEN % 16
sub XTMP1,LTMP,#16 // preserve last and tail block
csel LTMP,XTMP1,LTMP,ne // if tailnum != 0, len -= 16
mov WTMP0,0x87
ldr ROUNDS,[KEY,#240]
fmov TWX0,TWKD00
fmov TWX1,TWKD01
sub ROUNDS,ROUNDS,#6 // perload last 7 rounds key
add KTMP,KEY,XROUNDS,lsl#4
ld1 {RDK2.4s,RDK3.4s},[KTMP],#32
ld1 {RDK4.4s,RDK5.4s},[KTMP],#32
ld1 {RDK6.4s,RDK7.4s},[KTMP],#32
ld1 {RDK8.4s},[KTMP]
.Lxts_aesdec_start:
cmp LTMP, #80
b.ge .Lxts_dec_proc_5_blks
cmp LTMP, #48
b.ge .Lxts_dec_proc_3_blks
cmp LTMP, #32
b.eq .Lxts_dec_proc_2_blks
cmp LTMP, #16
b.eq .Lxts_dec_proc_1blk
cmp LTMP, #0
b.eq .Lxts_dec_last_secondblk
.Lxtx_dec_tail_blk:
fmov TWX0,TWKD00 // reset already computed tweak
fmov TWX1,TWKD01
cbz TAILNUM,.Lxts_aesdec_finish
// prepare encrypt tail block
sub TMPOUT,OUT,#16
.Lxtx_dec_tail_blk_loop:
subs TAILNUM,TAILNUM,1
ldrb WC,[TMPOUT,TAILNUM]
ldrb WP,[IN,TAILNUM]
strb WC,[OUT,TAILNUM]
strb WP,[TMPOUT,TAILNUM]
b.gt .Lxtx_dec_tail_blk_loop
ld1 {BLK0.16b}, [TMPOUT]
mov OUT,TMPOUT
mov TWK0.16b,TWK2.16b // load pre-tweak back
b .Lxts_dec_proc_1blk_loaded
cbz LTMP,.Lxts_aesdec_finish
.Lxts_dec_last_secondblk:
cbz TAILNUM,.Lxts_aesdec_finish
mov TWK2.16b,TWK0.16b // save last second tweak
NextTweak TWX0,TWX1,TWKD00,TWKD01
.Lxts_dec_proc_1blk:
ld1 {BLK0.16b}, [IN],#16
.Lxts_dec_proc_1blk_loaded:
mov KTMP, KEY
eor BLK0.16b,BLK0.16b,TWK0.16b
ld1 {RDK0.4s},[KTMP],#16
sub TROUNDS,ROUNDS,#2
ld1 {RDK1.4s},[KTMP],#16
.Lxts_dec_rounds_1blks:
AesDecrypt1x BLK0,RDK0
ld1 {RDK0.4s},[KTMP],#16
subs TROUNDS,TROUNDS,#2
AesDecrypt1x BLK0,RDK1
ld1 {RDK1.4s},[KTMP],#16
b.gt .Lxts_dec_rounds_1blks
AesDecrypt1x BLK0,RDK0
AesDecrypt1x BLK0,RDK1
// last 7 rounds
AesDecrypt1x BLK0,RDK2
AesDecrypt1x BLK0,RDK3
AesDecrypt1x BLK0,RDK4
AesDecrypt1x BLK0,RDK5
AesDecrypt1x BLK0,RDK6
aesd BLK0.16b,RDK7.16b // final round
eor BLK0.16b,BLK0.16b,RDK8.16b
eor BLK0.16b,BLK0.16b,TWK0.16b
st1 {BLK0.16b}, [OUT], #16
NextTweak TWX0,TWX1,TWKD00,TWKD01
subs LTMP,LTMP,#16
b.lt .Lxtx_dec_tail_blk
b.hs .Lxts_aesdec_start
.Lxts_dec_proc_2_blks:
ld1 {BLK0.16b, BLK1.16b}, [IN], #32
mov KTMP, KEY
NextTweak TWX0,TWX1,TWKD10,TWKD11
ld1 {RDK0.4s,RDK1.4s},[KTMP],#32
sub TROUNDS,ROUNDS,#2
eor BLK0.16b, BLK0.16b, TWK0.16b
eor BLK1.16b, BLK1.16b, TWK1.16b
.Lxts_dec_rounds_2blks:
AesDecrypt1x BLK0,RDK0
AesDecrypt1x BLK1,RDK0
ld1 {RDK0.4s},[KTMP],#16
subs TROUNDS,TROUNDS,#2
AesDecrypt1x BLK0,RDK1
AesDecrypt1x BLK1,RDK1
ld1 {RDK1.4s},[KTMP],#16
b.gt .Lxts_dec_rounds_2blks
AesDecrypt1x BLK0,RDK0
AesDecrypt1x BLK1,RDK0
AesDecrypt1x BLK0,RDK1
AesDecrypt1x BLK1,RDK1
// last 7 rounds
AesDecrypt1x BLK0,RDK2
AesDecrypt1x BLK1,RDK2
AesDecrypt1x BLK0,RDK3
AesDecrypt1x BLK1,RDK3
AesDecrypt1x BLK0,RDK4
AesDecrypt1x BLK1,RDK4
AesDecrypt1x BLK0,RDK5
AesDecrypt1x BLK1,RDK5
AesDecrypt1x BLK0,RDK6
AesDecrypt1x BLK1,RDK6
eor TWK0.16b,TWK0.16b,RDK8.16b
eor TWK1.16b,TWK1.16b,RDK8.16b
aesd BLK0.16b,RDK7.16b // final round
aesd BLK1.16b,RDK7.16b
eor BLK0.16b,BLK0.16b,TWK0.16b
eor BLK1.16b,BLK1.16b,TWK1.16b
st1 {BLK0.16b, BLK1.16b}, [OUT], #32
NextTweak TWX0,TWX1,TWKD00,TWKD01
subs LTMP,LTMP,#32
b.hs .Lxts_aesdec_start
.Lxts_dec_proc_3_blks:
ld1 {BLK0.16b}, [IN], #16 // first block
NextTweak TWX0,TWX1,TWKD10,TWKD11
eor BLK0.16b,BLK0.16b,TWK0.16b
ld1 {BLK1.16b}, [IN], #16 // second block
NextTweak TWX0,TWX1,TWKD20,TWKD21
eor BLK1.16b,BLK1.16b,TWK1.16b
ld1 {BLK2.16b}, [IN], #16 // third block
eor BLK2.16b,BLK2.16b,TWK2.16b
mov KTMP, KEY
ld1 {RDK0.4s,RDK1.4s},[KTMP],#32
sub TROUNDS,ROUNDS,#2
.Lxts_dec_rounds_3blks:
AesDecrypt1x BLK0,RDK0
AesDecrypt1x BLK1,RDK0
AesDecrypt1x BLK2,RDK0
ld1 {RDK0.4s},[KTMP],#16
subs TROUNDS,TROUNDS,#2
AesDecrypt1x BLK0,RDK1
AesDecrypt1x BLK1,RDK1
AesDecrypt1x BLK2,RDK1
ld1 {RDK1.4s},[KTMP],#16
b.gt .Lxts_dec_rounds_3blks
AesDecrypt1x BLK0,RDK0
AesDecrypt1x BLK1,RDK0
AesDecrypt1x BLK2,RDK0
AesDecrypt1x BLK0,RDK1
AesDecrypt1x BLK1,RDK1
AesDecrypt1x BLK2,RDK1
// last 7 rounds
AesDecrypt1x BLK0,RDK2
AesDecrypt1x BLK1,RDK2
AesDecrypt1x BLK2,RDK2
AesDecrypt1x BLK0,RDK3
AesDecrypt1x BLK1,RDK3
AesDecrypt1x BLK2,RDK3
AesDecrypt1x BLK0,RDK4
AesDecrypt1x BLK1,RDK4
AesDecrypt1x BLK2,RDK4
AesDecrypt1x BLK0,RDK5
AesDecrypt1x BLK1,RDK5
AesDecrypt1x BLK2,RDK5
AesDecrypt1x BLK0,RDK6
AesDecrypt1x BLK1,RDK6
AesDecrypt1x BLK2,RDK6
eor TWK0.16b,TWK0.16b,RDK8.16b
eor TWK1.16b,TWK1.16b,RDK8.16b
eor TWK2.16b,TWK2.16b,RDK8.16b
aesd BLK0.16b,RDK7.16b
aesd BLK1.16b,RDK7.16b
aesd BLK2.16b,RDK7.16b
eor BLK0.16b,BLK0.16b,TWK0.16b
eor BLK1.16b,BLK1.16b,TWK1.16b
eor BLK2.16b,BLK2.16b,TWK2.16b
st1 {BLK0.16b, BLK1.16b, BLK2.16b}, [OUT], #48
NextTweak TWX0,TWX1,TWKD00,TWKD01
subs LTMP,LTMP,#48
b.hs .Lxts_aesdec_start
.align 4
.Lxts_dec_proc_5_blks:
ld1 {BLK0.16b}, [IN], #16 // first block
NextTweak TWX0,TWX1,TWKD10,TWKD11
eor BLK0.16b,BLK0.16b,TWK0.16b
ld1 {BLK1.16b}, [IN], #16 // second block
NextTweak TWX0,TWX1,TWKD20,TWKD21
eor BLK1.16b,BLK1.16b,TWK1.16b
sub LTMP,LTMP,#32
ld1 {BLK2.16b}, [IN], #16 // third block
NextTweak TWX0,TWX1,TWKD30,TWKD31
eor BLK2.16b,BLK2.16b,TWK2.16b
ld1 {BLK3.16b}, [IN], #16 // fourth block
NextTweak TWX0,TWX1,TWKD40,TWKD41
eor BLK3.16b,BLK3.16b,TWK3.16b
sub LTMP,LTMP,#32
ld1 {BLK4.16b}, [IN], #16 // fifth block
eor BLK4.16b, BLK4.16b, TWK4.16b
sub LTMP,LTMP,#16
mov KTMP, KEY
ld1 {RDK0.4s,RDK1.4s},[KTMP],#32
sub TROUNDS,ROUNDS,#2
.align 4
.Lxts_dec_rounds_5blks:
AesDecrypt1x BLK0,RDK0
AesDecrypt1x BLK1,RDK0
AesDecrypt1x BLK2,RDK0
AesDecrypt1x BLK3,RDK0
AesDecrypt1x BLK4,RDK0
ld1 {RDK0.4s},[KTMP],#16
subs TROUNDS,TROUNDS,#2
AesDecrypt1x BLK0,RDK1
AesDecrypt1x BLK1,RDK1
AesDecrypt1x BLK2,RDK1
AesDecrypt1x BLK3,RDK1
AesDecrypt1x BLK4,RDK1
ld1 {RDK1.4s},[KTMP],#16
b.gt .Lxts_dec_rounds_5blks
AesDecrypt1x BLK0,RDK0
AesDecrypt1x BLK1,RDK0
AesDecrypt1x BLK2,RDK0
AesDecrypt1x BLK3,RDK0
AesDecrypt1x BLK4,RDK0
subs LTMP,LTMP,#80
AesDecrypt1x BLK0,RDK1
AesDecrypt1x BLK1,RDK1
AesDecrypt1x BLK2,RDK1
AesDecrypt1x BLK3,RDK1
AesDecrypt1x BLK4,RDK1
// last 7 rounds
AesDecrypt1x BLK0,RDK2
AesDecrypt1x BLK1,RDK2
AesDecrypt1x BLK2,RDK2
AesDecrypt1x BLK3,RDK2
AesDecrypt1x BLK4,RDK2
csel POS,xzr,LTMP,gt //
AesDecrypt1x BLK0,RDK3
AesDecrypt1x BLK1,RDK3
AesDecrypt1x BLK2,RDK3
AesDecrypt1x BLK3,RDK3
AesDecrypt1x BLK4,RDK3
add IN,IN,POS
AesDecrypt1x BLK0,RDK4
AesDecrypt1x BLK1,RDK4
AesDecrypt1x BLK2,RDK4
AesDecrypt1x BLK3,RDK4
AesDecrypt1x BLK4,RDK4
AesDecrypt1x BLK0,RDK5
AesDecrypt1x BLK1,RDK5
AesDecrypt1x BLK2,RDK5
AesDecrypt1x BLK3,RDK5
AesDecrypt1x BLK4,RDK5
AesDecrypt1x BLK0,RDK6
AesDecrypt1x BLK1,RDK6
AesDecrypt1x BLK2,RDK6
AesDecrypt1x BLK3,RDK6
AesDecrypt1x BLK4,RDK6
eor TMP0.16b,TWK0.16b,RDK8.16b
aesd BLK0.16b,RDK7.16b // final round
NextTweak TWX0,TWX1,TWKD00,TWKD01 // perform operations of next 5blks in advance
eor TMP1.16b,TWK1.16b,RDK8.16b
ld1 {IN0.16b}, [IN], #16
aesd BLK1.16b,RDK7.16b
NextTweak TWX0,TWX1,TWKD10,TWKD11
eor TMP2.16b,TWK2.16b,RDK8.16b
ld1 {IN1.16b}, [IN], #16
aesd BLK2.16b,RDK7.16b
NextTweak TWX0,TWX1,TWKD20,TWKD21
eor TMP3.16b,TWK3.16b,RDK8.16b
ld1 {IN2.16b}, [IN], #16
aesd BLK3.16b,RDK7.16b
NextTweak TWX0,TWX1,TWKD30,TWKD31
eor TMP4.16b,TWK4.16b,RDK8.16b
ld1 {IN3.16b}, [IN], #16
aesd BLK4.16b,RDK7.16b
NextTweak TWX0,TWX1,TWKD40,TWKD41
ld1 {IN4.16b}, [IN], #16
mov KTMP, KEY
ld1 {RDK0.4s,RDK1.4s},[KTMP],#32
eor TMP0.16b,TMP0.16b,BLK0.16b
eor BLK0.16b,IN0.16b,TWK0.16b // blk0 = in0 ^ twk0
eor TMP1.16b,TMP1.16b,BLK1.16b
eor BLK1.16b,IN1.16b,TWK1.16b
st1 {TMP0.16b}, [OUT], #16
eor TMP2.16b,TMP2.16b,BLK2.16b
eor BLK2.16b,IN2.16b,TWK2.16b
eor TMP3.16b,TMP3.16b,BLK3.16b
eor BLK3.16b,IN3.16b,TWK3.16b
st1 {TMP1.16b}, [OUT], #16
eor TMP4.16b,TMP4.16b,BLK4.16b
eor BLK4.16b,IN4.16b,TWK4.16b
st1 {TMP2.16b}, [OUT], #16
sub TROUNDS,ROUNDS,#2
st1 {TMP3.16b,TMP4.16b}, [OUT], #32
b.hs .Lxts_dec_rounds_5blks
add LTMP,LTMP,#80 // add 5 blocks length back if LTMP < 0
cbz LTMP, .Lxts_dec_pre_last_secondblks
cmp LTMP, #16
b.eq .Lxts_dec_pre_last_1blks
cmp LTMP,#32
b.eq .Lxts_dec_pre_last_2blks
cmp LTMP,#48
b.eq .Lxts_dec_pre_last_3blks
cmp LTMP,#64
b.eq .Lxts_dec_pre_last_4blks
.Lxts_dec_pre_last_secondblks:
fmov TWX0,TWKD10 // reset already computed tweak
fmov TWX1,TWKD11
mov TWK2.16b, TWK0.16b //save the last second tweak
mov TWK0.16b, TWK1.16b // use the last tweak
b .Lxts_dec_proc_1blk
.Lxts_dec_pre_last_1blks:
eor IN0.16b,IN0.16b,IN4.16b //in0 = in0 ^ in41
eor BLK0.16b,BLK0.16b,IN0.16b // blk0 = in0 ^ twk0 ^ in0 ^ in4
fmov TWX0,TWKD00 // reset already computed tweak
fmov TWX1,TWKD01
b .Lxts_dec_rounds_1blks
.Lxts_dec_pre_last_2blks:
eor BLK0.16b,BLK0.16b,IN0.16b
eor BLK1.16b,BLK1.16b,IN1.16b
eor BLK0.16b,BLK0.16b,IN3.16b // in3 -> blk0
eor BLK1.16b,BLK1.16b,IN4.16b // in4 -> blk1
fmov TWX0,TWKD10 // reset already computed tweak
fmov TWX1,TWKD11
b .Lxts_dec_rounds_2blks
.Lxts_dec_pre_last_3blks:
eor BLK0.16b,BLK0.16b,IN0.16b
eor BLK1.16b,BLK1.16b,IN1.16b
eor BLK2.16b,BLK2.16b,IN2.16b
eor BLK0.16b,BLK0.16b,IN2.16b // in2 -> blk0
eor BLK1.16b,BLK1.16b,IN3.16b // in3 -> blk1
eor BLK2.16b,BLK2.16b,IN4.16b // in4 -> blk2
fmov TWX0,TWKD20 // reset already computed tweak
fmov TWX1,TWKD21
b .Lxts_dec_rounds_3blks
.Lxts_dec_pre_last_4blks:
eor BLK0.16b,BLK0.16b,IN0.16b
eor BLK1.16b,BLK1.16b,IN1.16b
eor BLK2.16b,BLK2.16b,IN2.16b
eor BLK3.16b,BLK3.16b,IN3.16b
sub IN,IN,#16 // have loaded 4blks, using 3blks to process, so step back 1blk here
eor BLK0.16b,BLK0.16b,IN1.16b // in1 -> blk0
eor BLK1.16b,BLK1.16b,IN2.16b // in2 -> blk1
eor BLK2.16b,BLK2.16b,IN3.16b // in3 -> blk2
eor BLK3.16b,BLK3.16b,IN4.16b // in4 -> blk3
fmov TWX0,TWKD20 // reset already computed tweak
fmov TWX1,TWKD21
b .Lxts_dec_rounds_3blks
.Lxts_aesdec_finish:
MOV_REG_TO_VEC(TWX0,TWX1,TWKD00,TWKD01)
st1 {TWK0.16b}, [TWEAK]
mov x0, #0
ldp d14, d15, [sp,#64]
ldp d12, d13, [sp, #48]
ldp d10, d11, [sp, #32]
ldp d8, d9, [sp, #16]
ldp x29, x30, [sp], #80
AARCH64_AUTIASP
ret
.size CRYPT_AES_XTS_Decrypt, .-CRYPT_AES_XTS_Decrypt
#endif
| 2302_82127028/openHiTLS-examples_1508 | crypto/aes/src/asm/crypt_aes_xts_armv8.S | Unix Assembly | unknown | 25,458 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#include "hitls_build.h"
#if defined(HITLS_CRYPTO_AES) && defined(HITLS_CRYPTO_XTS)
#include "crypt_aes_macro_x86_64.s"
.file "crypt_aes_xts_x86_64.S"
.set KEY, %rdi
.set IN, %rsi
.set OUT, %rdx
.set LEN, %ecx
.set TWEAK, %r8
.set KTMP, %r9
.set LTMP, %r15d
.set TAILNUM,%r14d
.set TMPOUT,%r13
.set TMPIN,%r9
.set ROUNDS, %eax
.set RET, %eax
.set TROUNDS, %r10
.set ROUNDSQ,%rax
.set KEYEND,%r9
.set WTMP0, %ecx
.set WTMP1, %r10d
.set WTMP2, %r11d
.set XTMP0, %rcx
.set XTMP1, %r10
.set XTMP2, %r11
.set TWX0, %r13
.set TWX1, %r14
.set BLK0, %xmm8
.set BLK1, %xmm9
.set BLK2, %xmm10
.set BLK3, %xmm11
.set BLK4, %xmm12
.set BLK5, %xmm13
.set BLK6, %xmm14
.set TWEAK0, %xmm0
.set TWEAK1, %xmm1
.set TWEAK2, %xmm2
.set TWEAK3, %xmm3
.set TWEAK4, %xmm4
.set TWEAK5, %xmm5
.set TWEAK6, %xmm6
.set RDK, %xmm15
.set RDK1, %xmm7
.set TMPX, %xmm7
.set GFP, %xmm6
.set TWKTMP, %xmm14
.macro NextTweakCore gfp, twkin, twktmp, tmp
vmovdqa \twktmp,\tmp
vpaddd \twktmp,\twktmp,\twktmp // doubleword << 1
vpsrad $31,\tmp,\tmp // ASR doubleword
vpaddq \twkin,\twkin,\twkin // quadword << 1
vpand \gfp,\tmp,\tmp // and 0x10000000000000087
vpxor \tmp,\twkin,\twkin
.endm
.macro NextTweak gfp, twkin, twkout, twktmp, tmp
NextTweakCore \gfp,\twkin,\twktmp,\tmp
vmovdqa \twkin,\twkout
.endm
.macro SAVE_STACK
push %rbx
push %rbp
push %rsp
push %r12
push %r13
push %r14
push %r15
.endm
.macro LOAD_STACK
pop %r15
pop %r14
pop %r13
pop %r12
pop %rsp
pop %rbp
pop %rbx
.endm
.data
.align 64
// modulus of Galois Field x^128+x^7+x^2+x+1 => 0x87(0b10000111)
.Lgfp128:
.long 0x87,0,1,0
.text
/**
* Function description: Sets the AES encryption assembly acceleration API in XTS mode.
* Function prototype: int32_t CRYPT_AES_XTS_Encrypt(const CRYPT_AES_Key *ctx,
* const uint8_t *in, uint8_t *out, uint32_t len);
* Input register:
* x0: Pointer to the input key structure.
* x1: Points to the 128-bit input data.
* x2: Points to the 128-bit output data.
* x3: Indicates the length of a data block, that is, 16 bytes.
* Change register: xmm1,xmm3,xmm4,xmm5,xmm6,xmm10,xmm11,xmm12,xmm13.
* Output register: eax.
* Function/Macro Call: None.
*/
.align 32
.globl CRYPT_AES_XTS_Encrypt
.type CRYPT_AES_XTS_Encrypt, @function
CRYPT_AES_XTS_Encrypt:
.cfi_startproc
pushq %rbx
pushq %rbp
pushq %r12
pushq %r13
pushq %r14
pushq %r15
sub $96,%rsp
mov %rsp,%rbp
and $-16,%rsp // 16 bytes align
movl LEN, LTMP
movl LEN, TAILNUM
andl $-16,LTMP
andl $0xf,TAILNUM // LEN % 16
movl 240(KEY), ROUNDS
vmovdqa .Lgfp128(%rip),GFP
vmovdqu (TWEAK), TWEAK0
shl $4,ROUNDS // roundkey size: rounds*16, except for the last one
lea 16(KEY, ROUNDSQ),KEYEND // step to the end of roundkeys
.Lxts_aesenc_start:
cmpl $64, LTMP
jae .Lxts_enc_above_equal_4_blks
cmpl $32, LTMP
jae .Lxts_enc_above_equal_2_blks
cmpl $0, LTMP
je .Lxts_aesenc_finish
jmp .Lxts_enc_proc_1_blk
.Lxts_enc_above_equal_2_blks:
cmpl $48, LTMP
jb .Lxts_enc_proc_2_blks
jmp .Lxts_enc_proc_3_blks
.Lxts_enc_above_equal_4_blks:
cmpl $96, LTMP
jae .Lxts_enc_proc_6_blks_pre
cmpl $80, LTMP
jb .Lxts_enc_proc_4_blks
jmp .Lxts_enc_proc_5_blks
.align 16
.Lxts_enc_proc_1_blk:
vmovdqu (IN),BLK0
.Lxts_enc_proc_1blk_loaded:
mov KEY,KTMP
vpshufd $0x5f,TWEAK0,TWKTMP
vmovdqa TWEAK0,TWEAK5
movl 240(KTMP), ROUNDS
vmovdqu (KTMP), RDK
vpxor RDK,BLK0,BLK0
decl ROUNDS
vpxor TWEAK0, BLK0, BLK0
AES_ENC_1_BLK KTMP ROUNDS RDK BLK0
vpxor TWEAK0, BLK0, BLK0
vmovdqu BLK0, (OUT)
NextTweak GFP, TWEAK5, TWEAK0, TWKTMP, TMPX
lea 16(IN),IN
subl $16,LTMP
lea 16(OUT),OUT
je .Lxts_aesenc_finish
.align 16
.Lxts_enc_proc_2_blks:
mov KEY,KTMP
vpshufd $0x5f,TWEAK0,TWKTMP
vmovdqa TWEAK0,TWEAK5
movl 240(KTMP), ROUNDS
vmovdqu (KTMP), RDK
NextTweak GFP, TWEAK5, TWEAK1, TWKTMP, TMPX
vpxor (IN), RDK, BLK0
vpxor 16(IN), RDK, BLK1
decl ROUNDS
vpxor TWEAK0, BLK0, BLK0
vpxor TWEAK1, BLK1, BLK1
AES_ENC_2_BLKS KTMP ROUNDS RDK BLK0 BLK1
vpxor TWEAK0, BLK0, BLK0
vpxor TWEAK1, BLK1, BLK1
vmovdqu BLK0, (OUT)
vmovdqu BLK1, 16(OUT)
NextTweak GFP, TWEAK5, TWEAK0, TWKTMP, TMPX
lea 32(IN),IN
subl $32,LTMP
lea 32(OUT),OUT
je .Lxts_aesenc_finish
.align 16
.Lxts_enc_proc_3_blks:
mov KEY,KTMP
vpshufd $0x5f,TWEAK0,TWKTMP
vmovdqa TWEAK0,TWEAK5
movl 240(KTMP), ROUNDS
vmovdqu (KTMP), RDK
NextTweak GFP, TWEAK5, TWEAK1, TWKTMP, TMPX
vpxor (IN), RDK, BLK0
vpxor 16(IN), RDK, BLK1
NextTweak GFP, TWEAK5, TWEAK2, TWKTMP, TMPX
vpxor 32(IN), RDK, BLK2
decl ROUNDS
vpxor TWEAK0, BLK0, BLK0
vpxor TWEAK1, BLK1, BLK1
vpxor TWEAK2, BLK2, BLK2
AES_ENC_3_BLKS KTMP ROUNDS RDK BLK0 BLK1 BLK2
vpxor TWEAK0, BLK0, BLK0
vpxor TWEAK1, BLK1, BLK1
vpxor TWEAK2, BLK2, BLK2
vmovdqu BLK0, (OUT)
vmovdqu BLK1, 16(OUT)
vmovdqu BLK2, 32(OUT)
NextTweak GFP, TWEAK5, TWEAK0, TWKTMP, TMPX
lea 48(IN),IN
subl $48,LTMP
lea 48(OUT),OUT
je .Lxts_aesenc_finish
.align 16
.Lxts_enc_proc_4_blks:
mov KEY,KTMP
vpshufd $0x5f,TWEAK0,TWKTMP
vmovdqa TWEAK0,TWEAK5
movl 240(KTMP), ROUNDS
vmovdqu (KTMP), RDK
NextTweak GFP, TWEAK5, TWEAK1, TWKTMP, TMPX
vpxor (IN), RDK, BLK0
vpxor 16(IN), RDK, BLK1
NextTweak GFP, TWEAK5, TWEAK2, TWKTMP, TMPX
vpxor 32(IN), RDK, BLK2
NextTweak GFP, TWEAK5, TWEAK3, TWKTMP, TMPX
vpxor 48(IN), RDK, BLK3
decl ROUNDS
vpxor TWEAK0, BLK0, BLK0
vpxor TWEAK1, BLK1, BLK1
vpxor TWEAK2, BLK2, BLK2
vpxor TWEAK3, BLK3, BLK3
AES_ENC_4_BLKS KTMP ROUNDS RDK BLK0 BLK1 BLK2 BLK3
vpxor TWEAK0, BLK0, BLK0
vpxor TWEAK1, BLK1, BLK1
vpxor TWEAK2, BLK2, BLK2
vpxor TWEAK3, BLK3, BLK3
vmovdqu BLK0, (OUT)
vmovdqu BLK1, 16(OUT)
vmovdqu BLK2, 32(OUT)
vmovdqu BLK3, 48(OUT)
NextTweak GFP, TWEAK5, TWEAK0, TWKTMP, TMPX
lea 64(IN),IN
subl $64,LTMP
lea 64(OUT),OUT
je .Lxts_aesenc_finish
.align 16
.Lxts_enc_proc_5_blks:
mov KEY,KTMP
vpshufd $0x5f,TWEAK0,TWKTMP
vmovdqa TWEAK0,TWEAK5
movl 240(KTMP), ROUNDS
vmovdqu (KTMP), RDK
NextTweak GFP, TWEAK5, TWEAK1, TWKTMP, TMPX
vpxor (IN), RDK, BLK0
vpxor 16(IN), RDK, BLK1
NextTweak GFP, TWEAK5, TWEAK2, TWKTMP, TMPX
vpxor 32(IN), RDK, BLK2
NextTweak GFP, TWEAK5, TWEAK3, TWKTMP, TMPX
vpxor 48(IN), RDK, BLK3
NextTweak GFP, TWEAK5, TWEAK4, TWKTMP, TMPX
vpxor 64(IN), RDK, BLK4
decl ROUNDS
vpxor TWEAK0, BLK0, BLK0
vpxor TWEAK1, BLK1, BLK1
vpxor TWEAK2, BLK2, BLK2
vpxor TWEAK3, BLK3, BLK3
vpxor TWEAK4, BLK4, BLK4
AES_ENC_5_BLKS KTMP ROUNDS RDK BLK0 BLK1 BLK2 BLK3 BLK4
vpxor TWEAK0, BLK0, BLK0
vpxor TWEAK1, BLK1, BLK1
vpxor TWEAK2, BLK2, BLK2
vpxor TWEAK3, BLK3, BLK3
vpxor TWEAK4, BLK4, BLK4
vmovdqu BLK0, (OUT)
vmovdqu BLK1, 16(OUT)
vmovdqu BLK2, 32(OUT)
vmovdqu BLK3, 48(OUT)
vmovdqu BLK4, 64(OUT)
NextTweak GFP, TWEAK5, TWEAK0, TWKTMP, TMPX
lea 80(IN),IN
subl $80,LTMP
lea 80(OUT),OUT
je .Lxts_aesenc_finish
.align 16
.Lxts_enc_proc_6_blks_pre:
vpshufd $0x5f,TWEAK0,TWKTMP // save higher doubleword of tweak
vmovdqa TWEAK0,TWEAK5 // copy first tweak
NextTweak GFP, TWEAK5, TWEAK1, TWKTMP, TMPX
NextTweak GFP, TWEAK5, TWEAK2, TWKTMP, TMPX
NextTweak GFP, TWEAK5, TWEAK3, TWKTMP, TMPX
NextTweak GFP, TWEAK5, TWEAK4, TWKTMP, TMPX
NextTweakCore GFP, TWEAK5, TWKTMP, TMPX
.Lxts_enc_proc_6_blks:
vmovdqu (KEY), RDK
vmovdqu (IN),BLK0
vpxor TWEAK0,BLK0,BLK0 // blk0 ^= tweak0
vpxor RDK,BLK0,BLK0 // blk0 = blk0 ^ tweak0 ^ rk0, prepared for the loop round
vmovdqu -16(KEYEND),RDK1 // load last round key
vmovdqu 16(IN),BLK1
vpxor RDK1,TWEAK0,TWEAK0
aesenc 16(KEY),BLK0 // first round: rk1
vmovdqa TWEAK0,(%rsp)
vpxor TWEAK1,BLK1,BLK1
vpxor RDK,BLK1,BLK1
vmovdqu 32(IN),BLK2
vpxor RDK1,TWEAK1,TWEAK1
aesenc 16(KEY),BLK1
vmovdqa TWEAK1,16(%rsp)
vpxor TWEAK2,BLK2,BLK2
vpxor RDK,BLK2,BLK2
vmovdqu 48(IN),BLK3
vpxor RDK1,TWEAK2,TWEAK2
aesenc 16(KEY),BLK2
vmovdqa TWEAK2,32(%rsp)
vpxor TWEAK3,BLK3,BLK3
vpxor RDK,BLK3,BLK3
vmovdqu 64(IN),BLK4
vpxor RDK1,TWEAK3,TWEAK3
aesenc 16(KEY),BLK3
vmovdqa TWEAK3,48(%rsp)
vpxor TWEAK4,BLK4,BLK4
vpxor RDK,BLK4,BLK4
vmovdqu 80(IN),BLK5
vpxor RDK1,TWEAK4,TWEAK4
aesenc 16(KEY),BLK4
vmovdqa TWEAK4,64(%rsp)
vpxor TWEAK5,BLK5,BLK5
vpxor RDK,BLK5,BLK5
vpxor RDK1,TWEAK5,TWEAK5
aesenc 16(KEY),BLK5
vmovdqa TWEAK5,80(%rsp)
mov $(7*16),TROUNDS // loop 7 rounds
sub ROUNDSQ,TROUNDS
.align 16
.Lxts_6_blks_loop:
vmovdqu -96(KEYEND,TROUNDS),RDK // left 5+1 block to interval
aesenc RDK, BLK0
aesenc RDK, BLK1
aesenc RDK, BLK2
add $16,TROUNDS
aesenc RDK, BLK3
aesenc RDK, BLK4
aesenc RDK, BLK5
jnz .Lxts_6_blks_loop
vpxor 80(%rsp),RDK1,TWEAK5 // tweak5 = tweak5^lastroundkey^lastroundkey
vmovdqu -96(KEYEND,TROUNDS),RDK
vpshufd $0x5f,TWEAK5,TWKTMP // use new tweak-tmp
vmovdqa TWKTMP,TMPX // pre-calculate next round tweak0~tweak5
aesenc RDK, BLK0
vpaddd TWKTMP,TWKTMP,TWKTMP
vpsrad $31,TMPX,TMPX
aesenc RDK, BLK1
vpaddq TWEAK5,TWEAK5,TWEAK5
vpand GFP,TMPX,TMPX
aesenc RDK, BLK2
vpxor TMPX,TWEAK5,TWEAK5
add $16,TROUNDS
aesenc RDK, BLK3
vmovdqa TWEAK5,TWEAK0
aesenc RDK, BLK4
aesenc RDK, BLK5
vmovdqu -96(KEYEND,TROUNDS),RDK
vmovdqa TWKTMP,TMPX
aesenc RDK, BLK0
vpaddd TWKTMP,TWKTMP,TWKTMP
vpsrad $31,TMPX,TMPX
aesenc RDK, BLK1
vpaddq TWEAK5,TWEAK5,TWEAK5
vpand GFP,TMPX,TMPX
aesenc RDK, BLK2
vpxor TMPX,TWEAK5,TWEAK5
add $16,TROUNDS
aesenc RDK, BLK3
vmovdqa TWEAK5,TWEAK1
aesenc RDK, BLK4
aesenc RDK, BLK5
vmovdqu -96(KEYEND,TROUNDS),RDK
vmovdqa TWKTMP,TMPX
aesenc RDK, BLK0
vpaddd TWKTMP,TWKTMP,TWKTMP
vpsrad $31,TMPX,TMPX
aesenc RDK, BLK1
vpaddq TWEAK5,TWEAK5,TWEAK5
vpand GFP,TMPX,TMPX
aesenc RDK, BLK2
vpxor TMPX,TWEAK5,TWEAK5
add $16,TROUNDS
aesenc RDK, BLK3
vmovdqa TWEAK5,TWEAK2
aesenc RDK, BLK4
aesenc RDK, BLK5
vmovdqu -96(KEYEND,TROUNDS),RDK
vmovdqa TWKTMP,TMPX
aesenc RDK, BLK0
vpaddd TWKTMP,TWKTMP,TWKTMP
vpsrad $31,TMPX,TMPX
aesenc RDK, BLK1
vpaddq TWEAK5,TWEAK5,TWEAK5
vpand GFP,TMPX,TMPX
aesenc RDK, BLK2
vpxor TMPX,TWEAK5,TWEAK5
add $16,TROUNDS
aesenc RDK, BLK3
vmovdqa TWEAK5,TWEAK3
aesenc RDK, BLK4
aesenc RDK, BLK5
vmovdqu -96(KEYEND,TROUNDS),RDK
vmovdqa TWKTMP,TMPX
aesenc RDK, BLK0
vpaddd TWKTMP,TWKTMP,TWKTMP
vpsrad $31,TMPX,TMPX
aesenc RDK, BLK1
vpaddq TWEAK5,TWEAK5,TWEAK5
vpand GFP,TMPX,TMPX
aesenc RDK, BLK2
vpxor TMPX,TWEAK5,TWEAK5
aesenc RDK, BLK3
vmovdqa TWEAK5,TWEAK4
aesenc RDK, BLK4
aesenc RDK, BLK5
vmovdqa TWKTMP,TMPX
aesenclast (%rsp), BLK0
aesenclast 16(%rsp), BLK1 // already do the tweak^lastround, so here just aesenclast
vpaddd TWKTMP,TWKTMP,TWKTMP
vpsrad $31,TMPX,TMPX
aesenclast 32(%rsp), BLK2
vpaddq TWEAK5,TWEAK5,TWEAK5
vpand GFP,TMPX,TMPX
aesenclast 48(%rsp), BLK3
vpxor TMPX,TWEAK5,TWEAK5
aesenclast 64(%rsp), BLK4
aesenclast 80(%rsp), BLK5
vmovdqu BLK0, (OUT)
vmovdqu BLK1, 16(OUT)
vmovdqu BLK2, 32(OUT)
vmovdqu BLK3, 48(OUT)
vmovdqu BLK4, 64(OUT)
vmovdqu BLK5, 80(OUT)
leaq 96(IN), IN
leaq 96(OUT), OUT
sub $96, LTMP
cmp $96, LTMP
jb .Lxts_aesenc_start
jmp .Lxts_enc_proc_6_blks
.align 16
.Lxts_aesenc_finish:
cmp $0,TAILNUM
je .Lxts_ret
.Lxts_tail_proc:
mov OUT,TMPOUT
mov IN,TMPIN
.Lxts_tail_loop:
sub $1,TAILNUM
movzb -16(TMPOUT),%r10d
movzb (TMPIN),%r11d
mov %r10b,(TMPOUT)
lea 1(TMPIN),TMPIN
mov %r11b,-16(TMPOUT)
lea 1(TMPOUT),TMPOUT
ja .Lxts_tail_loop
sub $16,OUT // step 1 block back to save the last stealing block encryption
add $16,LTMP
vmovdqu (OUT),BLK0
jmp .Lxts_enc_proc_1blk_loaded
.Lxts_ret:
vmovdqu TWEAK0, (TWEAK)
vpxor BLK0, BLK0, BLK0
vpxor BLK1, BLK1, BLK1
vpxor BLK2, BLK2, BLK2
vpxor BLK3, BLK3, BLK3
vpxor BLK4, BLK4, BLK4
vpxor BLK5, BLK5, BLK5
vpxor BLK6, BLK6, BLK6
vpxor RDK, RDK, RDK
movl $0, RET
mov %rbp,%rsp
add $96,%rsp
popq %r15
popq %r14
popq %r13
popq %r12
popq %rbp
popq %rbx
ret
.cfi_endproc
.size CRYPT_AES_XTS_Encrypt, .-CRYPT_AES_XTS_Encrypt
/**
* Function description: Sets the AES decryption and assembly acceleration API in XTS mode.
* Function prototype: int32_t CRYPT_AES_XTS_Decrypt(const CRYPT_AES_Key *ctx,
* const uint8_t *in, uint8_t *out, uint32_t len);
* Input register:
* x0: Pointer to the input key structure.
* x1: Points to the 128-bit input data.
* x2: Indicates the 128-bit output data.
* x3: Indicates the length of a data block, that is, 16 bytes.
* Change register: xmm1,xmm3,xmm4,xmm5,xmm6,xmm10,xmm11,xmm12,xmm13.
* Output register: eax.
* Function/Macro Call: None.
*/
.align 32
.globl CRYPT_AES_XTS_Decrypt
.type CRYPT_AES_XTS_Decrypt, @function
CRYPT_AES_XTS_Decrypt:
.cfi_startproc
pushq %rbx
pushq %rbp
pushq %r12
pushq %r13
pushq %r14
pushq %r15
sub $96,%rsp
mov %rsp,%rbp
and $-16,%rsp // 16 bytes align
movl LEN, LTMP
movl LEN, TAILNUM
andl $-16,LTMP
movl LTMP,WTMP2
sub $16,WTMP2 // preserve last and tail block
andl $0xf,TAILNUM // LEN % 16
cmovg WTMP2,LTMP
movl 240(KEY), ROUNDS
vmovdqa .Lgfp128(%rip),GFP
vmovdqu (TWEAK), TWEAK0
shl $4,ROUNDS // roundkey size: rounds*16, except for the last one
lea 16(KEY, ROUNDSQ),KEYEND // step to the end of roundkeys
.Lxts_aesdec_start:
cmpl $64, LTMP
jae .Lxts_dec_above_equal_4_blks
cmpl $32, LTMP
jae .Lxts_dec_above_equal_2_blks
cmpl $0, LTMP
je .Lxts_dec_last_2blks
jmp .Lxts_dec_proc_1_blk
.Lxts_dec_above_equal_2_blks:
cmpl $48, LTMP
jb .Lxts_dec_proc_2_blks
jmp .Lxts_dec_proc_3_blks
.Lxts_dec_above_equal_4_blks:
cmpl $96, LTMP
jae .Lxts_dec_proc_6_blks_pre
cmpl $80, LTMP
jb .Lxts_dec_proc_4_blks
jmp .Lxts_dec_proc_5_blks
.align 16
.Lxts_dec_tail_proc:
cmp $0,TAILNUM
je .Lxts_aesdec_finish
vmovdqa TWEAK1,TWEAK0 // restore back tweak0
mov OUT,TMPOUT
mov IN,TMPIN
.Lxts_dec_tail_loop:
sub $1,TAILNUM
movzb -16(TMPOUT),%r10d
movzb (TMPIN),%r11d
mov %r10b,(TMPOUT)
lea 1(TMPIN),TMPIN
mov %r11b,-16(TMPOUT)
lea 1(TMPOUT),TMPOUT
ja .Lxts_dec_tail_loop
sub $16,OUT // step 1 block back to save the last stealing block encryption
add $16,LTMP
vmovdqu (OUT),BLK0
jmp .Lxts_dec_proc_1blk_loaded
.align 16
.Lxts_dec_last_2blks:
cmp $0,TAILNUM
je .Lxts_aesdec_finish
vpshufd $0x5f,TWEAK0,TWKTMP
vmovdqa TWEAK0,TWEAK1 // tail block use tweak0, last block use tweak1
NextTweakCore GFP, TWEAK0, TWKTMP, TMPX
.Lxts_dec_proc_1_blk:
vmovdqu (IN),BLK0
.Lxts_dec_proc_1blk_loaded:
mov KEY,KTMP
vpshufd $0x5f,TWEAK0,TWKTMP
vmovdqa TWEAK0,TWEAK5
movl 240(KTMP), ROUNDS
vmovdqu (KTMP), RDK
vpxor RDK,BLK0,BLK0
decl ROUNDS
vpxor TWEAK0, BLK0, BLK0
AES_DEC_1_BLK KTMP ROUNDS RDK BLK0
vpxor TWEAK0, BLK0, BLK0
vmovdqu BLK0, (OUT)
NextTweak GFP, TWEAK5, TWEAK0, TWKTMP, TMPX
lea 16(IN),IN
subl $16,LTMP
lea 16(OUT),OUT
jl .Lxts_dec_tail_proc
jmp .Lxts_aesdec_start
.align 16
.Lxts_dec_proc_2_blks:
mov KEY,KTMP
vpshufd $0x5f,TWEAK0,TWKTMP
vmovdqa TWEAK0,TWEAK5
movl 240(KTMP), ROUNDS
vmovdqu (KTMP), RDK
NextTweak GFP, TWEAK5, TWEAK1, TWKTMP, TMPX
vpxor (IN), RDK, BLK0
vpxor 16(IN), RDK, BLK1
decl ROUNDS
vpxor TWEAK0, BLK0, BLK0
vpxor TWEAK1, BLK1, BLK1
AES_DEC_2_BLKS KTMP ROUNDS RDK BLK0 BLK1
vpxor TWEAK0, BLK0, BLK0
vpxor TWEAK1, BLK1, BLK1
vmovdqu BLK0, (OUT)
vmovdqu BLK1, 16(OUT)
NextTweak GFP, TWEAK5, TWEAK0, TWKTMP, TMPX
lea 32(IN),IN
subl $32,LTMP
lea 32(OUT),OUT
jge .Lxts_aesdec_start
.align 16
.Lxts_dec_proc_3_blks:
mov KEY,KTMP
vpshufd $0x5f,TWEAK0,TWKTMP
vmovdqa TWEAK0,TWEAK5
movl 240(KTMP), ROUNDS
vmovdqu (KTMP), RDK
NextTweak GFP, TWEAK5, TWEAK1, TWKTMP, TMPX
vpxor (IN), RDK, BLK0
vpxor 16(IN), RDK, BLK1
NextTweak GFP, TWEAK5, TWEAK2, TWKTMP, TMPX
vpxor 32(IN), RDK, BLK2
decl ROUNDS
vpxor TWEAK0, BLK0, BLK0
vpxor TWEAK1, BLK1, BLK1
vpxor TWEAK2, BLK2, BLK2
AES_DEC_3_BLKS KTMP ROUNDS RDK BLK0 BLK1 BLK2
vpxor TWEAK0, BLK0, BLK0
vpxor TWEAK1, BLK1, BLK1
vpxor TWEAK2, BLK2, BLK2
vmovdqu BLK0, (OUT)
vmovdqu BLK1, 16(OUT)
vmovdqu BLK2, 32(OUT)
NextTweak GFP, TWEAK5, TWEAK0, TWKTMP, TMPX
lea 48(IN),IN
subl $48,LTMP
lea 48(OUT),OUT
jge .Lxts_aesdec_start
.align 16
.Lxts_dec_proc_4_blks:
mov KEY,KTMP
vpshufd $0x5f,TWEAK0,TWKTMP
vmovdqa TWEAK0,TWEAK5
movl 240(KTMP), ROUNDS
vmovdqu (KTMP), RDK
NextTweak GFP, TWEAK5, TWEAK1, TWKTMP, TMPX
vpxor (IN), RDK, BLK0
vpxor 16(IN), RDK, BLK1
NextTweak GFP, TWEAK5, TWEAK2, TWKTMP, TMPX
vpxor 32(IN), RDK, BLK2
NextTweak GFP, TWEAK5, TWEAK3, TWKTMP, TMPX
vpxor 48(IN), RDK, BLK3
decl ROUNDS
vpxor TWEAK0, BLK0, BLK0
vpxor TWEAK1, BLK1, BLK1
vpxor TWEAK2, BLK2, BLK2
vpxor TWEAK3, BLK3, BLK3
AES_DEC_4_BLKS KTMP ROUNDS RDK BLK0 BLK1 BLK2 BLK3
vpxor TWEAK0, BLK0, BLK0
vpxor TWEAK1, BLK1, BLK1
vpxor TWEAK2, BLK2, BLK2
vpxor TWEAK3, BLK3, BLK3
vmovdqu BLK0, (OUT)
vmovdqu BLK1, 16(OUT)
vmovdqu BLK2, 32(OUT)
vmovdqu BLK3, 48(OUT)
NextTweak GFP, TWEAK5, TWEAK0, TWKTMP, TMPX
lea 64(IN),IN
subl $64,LTMP
lea 64(OUT),OUT
jge .Lxts_aesdec_start
.align 16
.Lxts_dec_proc_5_blks:
mov KEY,KTMP
vpshufd $0x5f,TWEAK0,TWKTMP
vmovdqa TWEAK0,TWEAK5
movl 240(KTMP), ROUNDS
vmovdqu (KTMP), RDK
NextTweak GFP, TWEAK5, TWEAK1, TWKTMP, TMPX
vpxor (IN), RDK, BLK0
vpxor 16(IN), RDK, BLK1
NextTweak GFP, TWEAK5, TWEAK2, TWKTMP, TMPX
vpxor 32(IN), RDK, BLK2
NextTweak GFP, TWEAK5, TWEAK3, TWKTMP, TMPX
vpxor 48(IN), RDK, BLK3
NextTweak GFP, TWEAK5, TWEAK4, TWKTMP, TMPX
vpxor 64(IN), RDK, BLK4
decl ROUNDS
vpxor TWEAK0, BLK0, BLK0
vpxor TWEAK1, BLK1, BLK1
vpxor TWEAK2, BLK2, BLK2
vpxor TWEAK3, BLK3, BLK3
vpxor TWEAK4, BLK4, BLK4
AES_DEC_5_BLKS KTMP ROUNDS RDK BLK0 BLK1 BLK2 BLK3 BLK4
vpxor TWEAK0, BLK0, BLK0
vpxor TWEAK1, BLK1, BLK1
vpxor TWEAK2, BLK2, BLK2
vpxor TWEAK3, BLK3, BLK3
vpxor TWEAK4, BLK4, BLK4
vmovdqu BLK0, (OUT)
vmovdqu BLK1, 16(OUT)
vmovdqu BLK2, 32(OUT)
vmovdqu BLK3, 48(OUT)
vmovdqu BLK4, 64(OUT)
NextTweak GFP, TWEAK5, TWEAK0, TWKTMP, TMPX
lea 80(IN),IN
subl $80,LTMP
lea 80(OUT),OUT
jge .Lxts_aesdec_start
.align 32
.Lxts_dec_proc_6_blks_pre:
vpshufd $0x5f,TWEAK0,TWKTMP // save higher doubleword of tweak
vmovdqa TWEAK0,TWEAK5 // copy first tweak
NextTweak GFP, TWEAK5, TWEAK1, TWKTMP, TMPX
NextTweak GFP, TWEAK5, TWEAK2, TWKTMP, TMPX
NextTweak GFP, TWEAK5, TWEAK3, TWKTMP, TMPX
NextTweak GFP, TWEAK5, TWEAK4, TWKTMP, TMPX
NextTweakCore GFP, TWEAK5, TWKTMP, TMPX
.align 32
.Lxts_dec_proc_6_blks:
vmovdqu (KEY), RDK
vmovdqu (IN),BLK0
vpxor TWEAK0,BLK0,BLK0 // blk0 ^= tweak0
vpxor RDK,BLK0,BLK0 // blk0 = blk0 ^ tweak0 ^ rk0, prepared for the loop round
vmovdqu -16(KEYEND),RDK1 // load last round key
vmovdqu 16(IN),BLK1
vpxor RDK1,TWEAK0,TWEAK0
aesdec 16(KEY),BLK0 // first round: rk1
vmovdqa TWEAK0,(%rsp)
vpxor TWEAK1,BLK1,BLK1
vpxor RDK,BLK1,BLK1
vmovdqu 32(IN),BLK2
vpxor RDK1,TWEAK1,TWEAK1
aesdec 16(KEY),BLK1
vmovdqa TWEAK1,16(%rsp)
vpxor TWEAK2,BLK2,BLK2
vpxor RDK,BLK2,BLK2
vmovdqu 48(IN),BLK3
vpxor RDK1,TWEAK2,TWEAK2
aesdec 16(KEY),BLK2
vmovdqa TWEAK2,32(%rsp)
vpxor TWEAK3,BLK3,BLK3
vpxor RDK,BLK3,BLK3
vmovdqu 64(IN),BLK4
vpxor RDK1,TWEAK3,TWEAK3
aesdec 16(KEY),BLK3
vmovdqa TWEAK3,48(%rsp)
vpxor TWEAK4,BLK4,BLK4
vpxor RDK,BLK4,BLK4
vmovdqu 80(IN),BLK5
vpxor RDK1,TWEAK4,TWEAK4
aesdec 16(KEY),BLK4
vmovdqa TWEAK4,64(%rsp)
vpxor TWEAK5,BLK5,BLK5
vpxor RDK,BLK5,BLK5
vpxor RDK1,TWEAK5,TWEAK5
aesdec 16(KEY),BLK5
vmovdqa TWEAK5,80(%rsp)
mov $(7*16),TROUNDS // loop 7 rounds
sub ROUNDSQ,TROUNDS
.align 32
.Lxts_dec_6blks_loop:
vmovdqu -96(KEYEND,TROUNDS),RDK // left 5+1 block to interval
aesdec RDK, BLK0
aesdec RDK, BLK1
aesdec RDK, BLK2
add $16,TROUNDS
aesdec RDK, BLK3
aesdec RDK, BLK4
aesdec RDK, BLK5
jnz .Lxts_dec_6blks_loop
vpxor 80(%rsp),RDK1,TWEAK5 // tweak5 = tweak5^lastroundkey^lastroundkey
vmovdqu -96(KEYEND,TROUNDS),RDK
vpshufd $0x5f,TWEAK5,TWKTMP // use new tweak-tmp
vmovdqa TWKTMP,TMPX // pre-calculate next round tweak0~tweak5
aesdec RDK, BLK0
vpaddd TWKTMP,TWKTMP,TWKTMP
vpsrad $31,TMPX,TMPX
aesdec RDK, BLK1
vpaddq TWEAK5,TWEAK5,TWEAK5
vpand GFP,TMPX,TMPX
aesdec RDK, BLK2
vpxor TMPX,TWEAK5,TWEAK5
add $16,TROUNDS
aesdec RDK, BLK3
vmovdqa TWEAK5,TWEAK0
aesdec RDK, BLK4
aesdec RDK, BLK5
vmovdqu -96(KEYEND,TROUNDS),RDK
vmovdqa TWKTMP,TMPX
aesdec RDK, BLK0
vpaddd TWKTMP,TWKTMP,TWKTMP
vpsrad $31,TMPX,TMPX
aesdec RDK, BLK1
vpaddq TWEAK5,TWEAK5,TWEAK5
vpand GFP,TMPX,TMPX
aesdec RDK, BLK2
vpxor TMPX,TWEAK5,TWEAK5
add $16,TROUNDS
aesdec RDK, BLK3
vmovdqa TWEAK5,TWEAK1
aesdec RDK, BLK4
aesdec RDK, BLK5
vmovdqu -96(KEYEND,TROUNDS),RDK
vmovdqa TWKTMP,TMPX
aesdec RDK, BLK0
vpaddd TWKTMP,TWKTMP,TWKTMP
vpsrad $31,TMPX,TMPX
aesdec RDK, BLK1
vpaddq TWEAK5,TWEAK5,TWEAK5
vpand GFP,TMPX,TMPX
aesdec RDK, BLK2
vpxor TMPX,TWEAK5,TWEAK5
add $16,TROUNDS
aesdec RDK, BLK3
vmovdqa TWEAK5,TWEAK2
aesdec RDK, BLK4
aesdec RDK, BLK5
vmovdqu -96(KEYEND,TROUNDS),RDK
vmovdqa TWKTMP,TMPX
aesdec RDK, BLK0
vpaddd TWKTMP,TWKTMP,TWKTMP
vpsrad $31,TMPX,TMPX
aesdec RDK, BLK1
vpaddq TWEAK5,TWEAK5,TWEAK5
vpand GFP,TMPX,TMPX
aesdec RDK, BLK2
vpxor TMPX,TWEAK5,TWEAK5
add $16,TROUNDS
aesdec RDK, BLK3
vmovdqa TWEAK5,TWEAK3
aesdec RDK, BLK4
aesdec RDK, BLK5
vmovdqu -96(KEYEND,TROUNDS),RDK
vmovdqa TWKTMP,TMPX
aesdec RDK, BLK0
vpaddd TWKTMP,TWKTMP,TWKTMP
vpsrad $31,TMPX,TMPX
aesdec RDK, BLK1
vpaddq TWEAK5,TWEAK5,TWEAK5
vpand GFP,TMPX,TMPX
aesdec RDK, BLK2
vpxor TMPX,TWEAK5,TWEAK5
aesdec RDK, BLK3
vmovdqa TWEAK5,TWEAK4
aesdec RDK, BLK4
aesdec RDK, BLK5
vmovdqa TWKTMP,TMPX
aesdeclast (%rsp), BLK0
aesdeclast 16(%rsp), BLK1 // already do the tweak^lastround, so here just aesdeclast
vpaddd TWKTMP,TWKTMP,TWKTMP
vpsrad $31,TMPX,TMPX
aesdeclast 32(%rsp), BLK2
vpaddq TWEAK5,TWEAK5,TWEAK5
vpand GFP,TMPX,TMPX
aesdeclast 48(%rsp), BLK3
vpxor TMPX,TWEAK5,TWEAK5
aesdeclast 64(%rsp), BLK4
aesdeclast 80(%rsp), BLK5
vmovdqu BLK0, (OUT)
vmovdqu BLK1, 16(OUT)
vmovdqu BLK2, 32(OUT)
vmovdqu BLK3, 48(OUT)
vmovdqu BLK4, 64(OUT)
vmovdqu BLK5, 80(OUT)
leaq 96(IN), IN
leaq 96(OUT), OUT
sub $96, LTMP
cmp $96, LTMP
jb .Lxts_aesdec_start
jmp .Lxts_dec_proc_6_blks
.align 16
.Lxts_aesdec_finish:
vmovdqu TWEAK0, (TWEAK)
vpxor BLK0, BLK0, BLK0
vpxor BLK1, BLK1, BLK1
vpxor BLK2, BLK2, BLK2
vpxor BLK3, BLK3, BLK3
vpxor BLK4, BLK4, BLK4
vpxor BLK5, BLK5, BLK5
vpxor BLK6, BLK6, BLK6
vpxor RDK, RDK, RDK
movl $0, RET
mov %rbp,%rsp
add $96,%rsp
popq %r15
popq %r14
popq %r13
popq %r12
popq %rbp
popq %rbx
ret
.cfi_endproc
.size CRYPT_AES_XTS_Decrypt, .-CRYPT_AES_XTS_Decrypt
#endif
| 2302_82127028/openHiTLS-examples_1508 | crypto/aes/src/asm/crypt_aes_xts_x86_64.S | Unix Assembly | unknown | 25,529 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#include "hitls_build.h"
#ifdef HITLS_CRYPTO_AES
#include "securec.h"
#include "bsl_err_internal.h"
#include "crypt_utils.h"
#include "crypt_errno.h"
#include "bsl_sal.h"
#ifdef HITLS_CRYPTO_AES_PRECALC_TABLES
#include "crypt_aes_tbox.h"
#else
#include "crypt_aes_sbox.h"
#endif
#include "crypt_aes.h"
void SetEncryptKey128(CRYPT_AES_Key *ctx, const uint8_t *key)
{
ctx->rounds = 10; // 10 rounds
#ifdef HITLS_CRYPTO_AES_PRECALC_TABLES
SetAesKeyExpansionTbox(ctx, CRYPT_AES_128, key, true);
#else
SetAesKeyExpansionSbox(ctx, CRYPT_AES_128, key);
#endif
}
void SetEncryptKey192(CRYPT_AES_Key *ctx, const uint8_t *key)
{
ctx->rounds = 12; // 12 rounds
#ifdef HITLS_CRYPTO_AES_PRECALC_TABLES
SetAesKeyExpansionTbox(ctx, CRYPT_AES_192, key, true);
#else
SetAesKeyExpansionSbox(ctx, CRYPT_AES_192, key);
#endif
}
void SetEncryptKey256(CRYPT_AES_Key *ctx, const uint8_t *key)
{
ctx->rounds = 14; // 14 rounds
#ifdef HITLS_CRYPTO_AES_PRECALC_TABLES
SetAesKeyExpansionTbox(ctx, CRYPT_AES_256, key, true);
#else
SetAesKeyExpansionSbox(ctx, CRYPT_AES_256, key);
#endif
}
void SetDecryptKey128(CRYPT_AES_Key *ctx, const uint8_t *key)
{
ctx->rounds = 10; // 10 rounds
#ifdef HITLS_CRYPTO_AES_PRECALC_TABLES
SetAesKeyExpansionTbox(ctx, CRYPT_AES_128, key, false);
#else
SetAesKeyExpansionSbox(ctx, CRYPT_AES_128, key);
#endif
}
void SetDecryptKey192(CRYPT_AES_Key *ctx, const uint8_t *key)
{
ctx->rounds = 12; // 12 rounds
#ifdef HITLS_CRYPTO_AES_PRECALC_TABLES
SetAesKeyExpansionTbox(ctx, CRYPT_AES_192, key, false);
#else
SetAesKeyExpansionSbox(ctx, CRYPT_AES_192, key);
#endif
}
void SetDecryptKey256(CRYPT_AES_Key *ctx, const uint8_t *key)
{
ctx->rounds = 14; // 14 rounds
#ifdef HITLS_CRYPTO_AES_PRECALC_TABLES
SetAesKeyExpansionTbox(ctx, CRYPT_AES_256, key, false);
#else
SetAesKeyExpansionSbox(ctx, CRYPT_AES_256, key);
#endif
}
int32_t CRYPT_AES_Encrypt(const CRYPT_AES_Key *ctx, const uint8_t *in, uint8_t *out, uint32_t len)
{
#ifdef HITLS_CRYPTO_AES_PRECALC_TABLES
CRYPT_AES_EncryptTbox(ctx, in, out, len);
#else
CRYPT_AES_EncryptSbox(ctx, in, out, len);
#endif
return CRYPT_SUCCESS;
}
int32_t CRYPT_AES_Decrypt(const CRYPT_AES_Key *ctx, const uint8_t *in, uint8_t *out, uint32_t len)
{
#ifdef HITLS_CRYPTO_AES_PRECALC_TABLES
CRYPT_AES_DecryptTbox(ctx, in, out, len);
#else
CRYPT_AES_DecryptSbox(ctx, in, out, len);
#endif
return CRYPT_SUCCESS;
}
#endif /* HITLS_CRYPTO_AES */
| 2302_82127028/openHiTLS-examples_1508 | crypto/aes/src/crypt_aes.c | C | unknown | 3,030 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#ifndef CRYPT_AES_LOCAL_H
#define CRYPT_AES_LOCAL_H
#include "hitls_build.h"
#ifdef HITLS_CRYPTO_AES
#include "crypt_aes.h"
void SetEncryptKey128(CRYPT_AES_Key *ctx, const uint8_t *key);
void SetEncryptKey192(CRYPT_AES_Key *ctx, const uint8_t *key);
void SetEncryptKey256(CRYPT_AES_Key *ctx, const uint8_t *key);
void SetDecryptKey128(CRYPT_AES_Key *ctx, const uint8_t *key);
void SetDecryptKey192(CRYPT_AES_Key *ctx, const uint8_t *key);
void SetDecryptKey256(CRYPT_AES_Key *ctx, const uint8_t *key);
#endif // HITLS_CRYPTO_AES
#endif // CRYPT_AES_LOCAL_H
| 2302_82127028/openHiTLS-examples_1508 | crypto/aes/src/crypt_aes_local.h | C | unknown | 1,111 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#include "hitls_build.h"
#if defined(HITLS_CRYPTO_AES) && !defined(HITLS_CRYPTO_AES_PRECALC_TABLES)
#include "securec.h"
#include "bsl_err_internal.h"
#include "crypt_utils.h"
#include "crypt_errno.h"
#include "bsl_sal.h"
#include "crypt_aes.h"
#include "crypt_aes_sbox.h"
#define BYTE_BITS 8
static const uint8_t AES_S[256] = {
0x63U, 0x7cU, 0x77U, 0x7bU, 0xf2U, 0x6bU, 0x6fU, 0xc5U, 0x30U, 0x01U, 0x67U, 0x2bU, 0xfeU, 0xd7U, 0xabU, 0x76U,
0xcaU, 0x82U, 0xc9U, 0x7dU, 0xfaU, 0x59U, 0x47U, 0xf0U, 0xadU, 0xd4U, 0xa2U, 0xafU, 0x9cU, 0xa4U, 0x72U, 0xc0U,
0xb7U, 0xfdU, 0x93U, 0x26U, 0x36U, 0x3fU, 0xf7U, 0xccU, 0x34U, 0xa5U, 0xe5U, 0xf1U, 0x71U, 0xd8U, 0x31U, 0x15U,
0x04U, 0xc7U, 0x23U, 0xc3U, 0x18U, 0x96U, 0x05U, 0x9aU, 0x07U, 0x12U, 0x80U, 0xe2U, 0xebU, 0x27U, 0xb2U, 0x75U,
0x09U, 0x83U, 0x2cU, 0x1aU, 0x1bU, 0x6eU, 0x5aU, 0xa0U, 0x52U, 0x3bU, 0xd6U, 0xb3U, 0x29U, 0xe3U, 0x2fU, 0x84U,
0x53U, 0xd1U, 0x00U, 0xedU, 0x20U, 0xfcU, 0xb1U, 0x5bU, 0x6aU, 0xcbU, 0xbeU, 0x39U, 0x4aU, 0x4cU, 0x58U, 0xcfU,
0xd0U, 0xefU, 0xaaU, 0xfbU, 0x43U, 0x4dU, 0x33U, 0x85U, 0x45U, 0xf9U, 0x02U, 0x7fU, 0x50U, 0x3cU, 0x9fU, 0xa8U,
0x51U, 0xa3U, 0x40U, 0x8fU, 0x92U, 0x9dU, 0x38U, 0xf5U, 0xbcU, 0xb6U, 0xdaU, 0x21U, 0x10U, 0xffU, 0xf3U, 0xd2U,
0xcdU, 0x0cU, 0x13U, 0xecU, 0x5fU, 0x97U, 0x44U, 0x17U, 0xc4U, 0xa7U, 0x7eU, 0x3dU, 0x64U, 0x5dU, 0x19U, 0x73U,
0x60U, 0x81U, 0x4fU, 0xdcU, 0x22U, 0x2aU, 0x90U, 0x88U, 0x46U, 0xeeU, 0xb8U, 0x14U, 0xdeU, 0x5eU, 0x0bU, 0xdbU,
0xe0U, 0x32U, 0x3aU, 0x0aU, 0x49U, 0x06U, 0x24U, 0x5cU, 0xc2U, 0xd3U, 0xacU, 0x62U, 0x91U, 0x95U, 0xe4U, 0x79U,
0xe7U, 0xc8U, 0x37U, 0x6dU, 0x8dU, 0xd5U, 0x4eU, 0xa9U, 0x6cU, 0x56U, 0xf4U, 0xeaU, 0x65U, 0x7aU, 0xaeU, 0x08U,
0xbaU, 0x78U, 0x25U, 0x2eU, 0x1cU, 0xa6U, 0xb4U, 0xc6U, 0xe8U, 0xddU, 0x74U, 0x1fU, 0x4bU, 0xbdU, 0x8bU, 0x8aU,
0x70U, 0x3eU, 0xb5U, 0x66U, 0x48U, 0x03U, 0xf6U, 0x0eU, 0x61U, 0x35U, 0x57U, 0xb9U, 0x86U, 0xc1U, 0x1dU, 0x9eU,
0xe1U, 0xf8U, 0x98U, 0x11U, 0x69U, 0xd9U, 0x8eU, 0x94U, 0x9bU, 0x1eU, 0x87U, 0xe9U, 0xceU, 0x55U, 0x28U, 0xdfU,
0x8cU, 0xa1U, 0x89U, 0x0dU, 0xbfU, 0xe6U, 0x42U, 0x68U, 0x41U, 0x99U, 0x2dU, 0x0fU, 0xb0U, 0x54U, 0xbbU, 0x16U
};
#define SEARCH_SBOX(t) \
((AES_S[((t) >> 24)] << 24) | (AES_S[((t) >> 16) & 0xFF] << 16) | (AES_S[((t) >> 8) & 0xFF] << 8) | \
(AES_S[((t) >> 0) & 0xFF] << 0))
#define SEARCH_INVSBOX(t) \
((InvSubSbox(((t) >> 24)) << 24) | (InvSubSbox(((t) >> 16) & 0xFF) << 16) | (InvSubSbox(((t) >> 8) & 0xFF) << 8) | \
(InvSubSbox(((t) >> 0) & 0xFF) << 0))
void SetAesKeyExpansionSbox(CRYPT_AES_Key *ctx, uint32_t keyLenBits, const uint8_t *key)
{
uint32_t *ekey = ctx->key;
uint32_t keyLenByte = keyLenBits / (sizeof(uint32_t) * BYTE_BITS);
uint32_t i = 0;
for (i = 0; i < keyLenByte; ++i) {
ekey[i] = GET_UINT32_BE(key, i * sizeof(uint32_t));
}
for (; i < 4 * (ctx->rounds + 1); ++i) {
if ((i % keyLenByte) == 0) {
ekey[i] = ekey[i - keyLenByte] ^ SEARCH_SBOX(ROTL32(ekey[i - 1], BYTE_BITS)) ^
RoundConstArray(i / keyLenByte - 1);
} else if (keyLenByte > 6 && (i % keyLenByte) == 4) {
ekey[i] = ekey[i - keyLenByte] ^ SEARCH_SBOX(ekey[i - 1]);
} else {
ekey[i] = ekey[i - keyLenByte] ^ ekey[i - 1];
}
}
}
static void AesAddRoundKey(uint32_t *state, const uint32_t *round, int nr)
{
for (int i = 0; i < 4; ++i) {
state[i] ^= round[4 * nr + i];
}
}
static void AesSubBytes(uint32_t *state)
{
for (int i = 0; i < 4; ++i) {
state[i] = SEARCH_SBOX(state[i]);
}
}
static void AesShiftRows(uint32_t *state)
{
uint32_t s[4] = {0};
for (int32_t i = 0; i < 4; ++i) {
s[i] = state[i];
}
state[0] = (s[0] & 0xFF000000) | (s[1] & 0xFF0000) | (s[2] & 0xFF00) | (s[3] & 0xFF);
state[1] = (s[1] & 0xFF000000) | (s[2] & 0xFF0000) | (s[3] & 0xFF00) | (s[0] & 0xFF);
state[2] = (s[2] & 0xFF000000) | (s[3] & 0xFF0000) | (s[0] & 0xFF00) | (s[1] & 0xFF);
state[3] = (s[3] & 0xFF000000) | (s[0] & 0xFF0000) | (s[1] & 0xFF00) | (s[2] & 0xFF);
}
static uint8_t AesXtime(uint8_t x)
{
return ((x << 1) ^ (((x >> 7) & 1) * 0x1b));
}
static uint8_t AesXtimes(uint8_t x, int ts)
{
uint8_t tmpX = x;
int tmpTs = ts;
while (tmpTs-- > 0) {
tmpX = AesXtime(tmpX);
}
return tmpX;
}
static uint8_t AesMul(uint8_t x, uint8_t y)
{
return ((((y >> 0) & 1) * AesXtimes(x, 0)) ^ (((y >> 1) & 1) * AesXtimes(x, 1)) ^
(((y >> 2) & 1) * AesXtimes(x, 2)) ^ (((y >> 3) & 1) * AesXtimes(x, 3)) ^ (((y >> 4) & 1) * AesXtimes(x, 4)) ^
(((y >> 5) & 1) * AesXtimes(x, 5)) ^ (((y >> 6) & 1) * AesXtimes(x, 6)) ^ (((y >> 7) & 1) * AesXtimes(x, 7)));
}
static void AesMixColumns(uint32_t *state, bool isMixColumns)
{
uint8_t ts[16] = {0};
for (int32_t i = 0; i < 4; ++i) {
PUT_UINT32_BE(state[i], ts, 4 * i);
}
uint8_t aesY[16] = {2, 3, 1, 1, 1, 2, 3, 1, 1, 1, 2, 3, 3, 1, 1, 2};
uint8_t aesInvY[16] = {0x0e, 0x0b, 0x0d, 0x09, 0x09, 0x0e, 0x0b, 0x0d, 0x0d, 0x09, 0x0e, 0x0b, 0x0b, 0x0d, 0x09,
0x0e};
uint8_t s[4];
uint8_t *y = isMixColumns == true ? aesY : aesInvY;
for (int i = 0; i < 4; ++i) {
for (int r = 0; r < 4; ++r) {
s[r] = 0;
for (int j = 0; j < 4; ++j) {
s[r] = s[r] ^ AesMul(ts[i * 4 + j], y[r * 4 + j]);
}
}
for (int r = 0; r < 4; ++r) {
ts[i * 4 + r] = s[r];
}
}
for (int32_t i = 0; i < 4; ++i) {
state[i] = GET_UINT32_BE(ts, 4 * i);
}
}
// addRound + 9/11/13 * (sub + shiftRow + mix + addRound) + (sub + shiftRow + addRound)
void CRYPT_AES_EncryptSbox(const CRYPT_AES_Key *ctx, const uint8_t *in, uint8_t *out, uint32_t len)
{
(void)len;
uint32_t s[4] = {0};
for (int32_t i = 0; i < 4; ++i) {
s[i] = GET_UINT32_BE(in, 4 * i);
}
uint32_t nr = 0;
AesAddRoundKey(s, ctx->key, nr);
for (nr = 1; nr < ctx->rounds; ++nr) {
AesSubBytes(s);
AesShiftRows(s);
AesMixColumns(s, true);
AesAddRoundKey(s, ctx->key, nr);
}
AesSubBytes(s);
AesShiftRows(s);
AesAddRoundKey(s, ctx->key, nr);
for (int32_t i = 0; i < 4; ++i) {
PUT_UINT32_BE(s[i], out, 4 * i);
}
}
static void InvShiftRows(uint32_t *state)
{
uint32_t s[4] = {0};
for (int32_t i = 0; i < 4; ++i) {
s[i] = state[i];
}
state[0] = (s[0] & 0xFF000000) | (s[3] & 0xFF0000) | (s[2] & 0xFF00) | (s[1] & 0xFF);
state[1] = (s[1] & 0xFF000000) | (s[0] & 0xFF0000) | (s[3] & 0xFF00) | (s[2] & 0xFF);
state[2] = (s[2] & 0xFF000000) | (s[1] & 0xFF0000) | (s[0] & 0xFF00) | (s[3] & 0xFF);
state[3] = (s[3] & 0xFF000000) | (s[2] & 0xFF0000) | (s[1] & 0xFF00) | (s[0] & 0xFF);
}
static void InvSubBytes(uint32_t *state)
{
for (int i = 0; i < 4; ++i) {
state[i] = SEARCH_INVSBOX(state[i]);
}
}
// (addRound + InvShiftRow + InvSub) + 9/11/13 * (addRound + invMix + InvShiftRow + InvSub) + addRound
void CRYPT_AES_DecryptSbox(const CRYPT_AES_Key *ctx, const uint8_t *in, uint8_t *out, uint32_t len)
{
(void)len;
uint32_t s[4] = {0};
for (int32_t i = 0; i < 4; ++i) {
s[i] = GET_UINT32_BE(in, 4 * i);
}
uint32_t nr = ctx->rounds;
AesAddRoundKey(s, ctx->key, nr);
InvShiftRows(s);
InvSubBytes(s);
for (nr = ctx->rounds - 1; nr > 0; --nr) {
AesAddRoundKey(s, ctx->key, nr);
AesMixColumns(s, false);
InvShiftRows(s);
InvSubBytes(s);
}
AesAddRoundKey(s, ctx->key, nr);
for (int32_t i = 0; i < 4; ++i) {
PUT_UINT32_BE(s[i], out, 4 * i);
}
BSL_SAL_CleanseData(&s, 4 * sizeof(uint32_t));
}
#endif /* HITLS_CRYPTO_AES && !HITLS_CRYPTO_AES_PRECALC_TABLES */ | 2302_82127028/openHiTLS-examples_1508 | crypto/aes/src/crypt_aes_sbox.c | C | unknown | 8,560 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#ifndef CRYPT_AES_SBOX_H
#define CRYPT_AES_SBOX_H
#include "hitls_build.h"
#if defined(HITLS_CRYPTO_AES) && !defined(HITLS_CRYPTO_AES_PRECALC_TABLES)
#include "crypt_aes.h"
uint32_t RoundConstArray(int val);
uint8_t InvSubSbox(uint8_t val);
void SetAesKeyExpansionSbox(CRYPT_AES_Key *ctx, uint32_t keyLenBits, const uint8_t *key);
void CRYPT_AES_EncryptSbox(const CRYPT_AES_Key *ctx, const uint8_t *in, uint8_t *out, uint32_t len);
void CRYPT_AES_DecryptSbox(const CRYPT_AES_Key *ctx, const uint8_t *in, uint8_t *out, uint32_t len);
#endif /* HITLS_CRYPTO_AES && !HITLS_CRYPTO_AES_PRECALC_TABLES */
#endif | 2302_82127028/openHiTLS-examples_1508 | crypto/aes/src/crypt_aes_sbox.h | C | unknown | 1,189 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#include "hitls_build.h"
#ifdef HITLS_CRYPTO_AES
#include "securec.h"
#include "bsl_err_internal.h"
#include "crypt_utils.h"
#include "crypt_errno.h"
#include "crypt_aes_local.h"
#include "bsl_sal.h"
int32_t CRYPT_AES_SetEncryptKey128(CRYPT_AES_Key *ctx, const uint8_t *key, uint32_t len)
{
if (ctx == NULL || key == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return CRYPT_NULL_INPUT;
}
if (len != 16) {
BSL_ERR_PUSH_ERROR(CRYPT_AES_ERR_KEYLEN);
return CRYPT_AES_ERR_KEYLEN;
}
SetEncryptKey128(ctx, key);
return CRYPT_SUCCESS;
}
int32_t CRYPT_AES_SetEncryptKey192(CRYPT_AES_Key *ctx, const uint8_t *key, uint32_t len)
{
if (ctx == NULL || key == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return CRYPT_NULL_INPUT;
}
if (len != 24) {
BSL_ERR_PUSH_ERROR(CRYPT_AES_ERR_KEYLEN);
return CRYPT_AES_ERR_KEYLEN;
}
SetEncryptKey192(ctx, key);
return CRYPT_SUCCESS;
}
int32_t CRYPT_AES_SetEncryptKey256(CRYPT_AES_Key *ctx, const uint8_t *key, uint32_t len)
{
if (ctx == NULL || key == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return CRYPT_NULL_INPUT;
}
if (len != 32) {
BSL_ERR_PUSH_ERROR(CRYPT_AES_ERR_KEYLEN);
return CRYPT_AES_ERR_KEYLEN;
}
SetEncryptKey256(ctx, key);
return CRYPT_SUCCESS;
}
int32_t CRYPT_AES_SetDecryptKey128(CRYPT_AES_Key *ctx, const uint8_t *key, uint32_t len)
{
if (ctx == NULL || key == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return CRYPT_NULL_INPUT;
}
if (len != 16) {
BSL_ERR_PUSH_ERROR(CRYPT_AES_ERR_KEYLEN);
return CRYPT_AES_ERR_KEYLEN;
}
SetDecryptKey128(ctx, key);
return CRYPT_SUCCESS;
}
int32_t CRYPT_AES_SetDecryptKey192(CRYPT_AES_Key *ctx, const uint8_t *key, uint32_t len)
{
if (ctx == NULL || key == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return CRYPT_NULL_INPUT;
}
if (len != 24) {
BSL_ERR_PUSH_ERROR(CRYPT_AES_ERR_KEYLEN);
return CRYPT_AES_ERR_KEYLEN;
}
SetDecryptKey192(ctx, key);
return CRYPT_SUCCESS;
}
int32_t CRYPT_AES_SetDecryptKey256(CRYPT_AES_Key *ctx, const uint8_t *key, uint32_t len)
{
if (ctx == NULL || key == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return CRYPT_NULL_INPUT;
}
if (len != 32) {
BSL_ERR_PUSH_ERROR(CRYPT_AES_ERR_KEYLEN);
return CRYPT_AES_ERR_KEYLEN;
}
SetDecryptKey256(ctx, key);
return CRYPT_SUCCESS;
}
void CRYPT_AES_Clean(CRYPT_AES_Key *ctx)
{
if (ctx == NULL) {
return;
}
BSL_SAL_CleanseData((void *)(ctx), sizeof(CRYPT_AES_Key));
}
#endif /* HITLS_CRYPTO_AES */
| 2302_82127028/openHiTLS-examples_1508 | crypto/aes/src/crypt_aes_setkey.c | C | unknown | 3,263 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#include "hitls_build.h"
#ifdef HITLS_CRYPTO_AES
#include "securec.h"
#include "bsl_err_internal.h"
#include "crypt_utils.h"
#include "crypt_errno.h"
#include "bsl_sal.h"
#include "crypt_aes.h"
#include "crypt_aes_tbox.h"
static const uint8_t INV_S[256] = {
0x52U, 0x09U, 0x6aU, 0xd5U, 0x30U, 0x36U, 0xa5U, 0x38U, 0xbfU, 0x40U, 0xa3U, 0x9eU, 0x81U, 0xf3U, 0xd7U, 0xfbU,
0x7cU, 0xe3U, 0x39U, 0x82U, 0x9bU, 0x2fU, 0xffU, 0x87U, 0x34U, 0x8eU, 0x43U, 0x44U, 0xc4U, 0xdeU, 0xe9U, 0xcbU,
0x54U, 0x7bU, 0x94U, 0x32U, 0xa6U, 0xc2U, 0x23U, 0x3dU, 0xeeU, 0x4cU, 0x95U, 0x0bU, 0x42U, 0xfaU, 0xc3U, 0x4eU,
0x08U, 0x2eU, 0xa1U, 0x66U, 0x28U, 0xd9U, 0x24U, 0xb2U, 0x76U, 0x5bU, 0xa2U, 0x49U, 0x6dU, 0x8bU, 0xd1U, 0x25U,
0x72U, 0xf8U, 0xf6U, 0x64U, 0x86U, 0x68U, 0x98U, 0x16U, 0xd4U, 0xa4U, 0x5cU, 0xccU, 0x5dU, 0x65U, 0xb6U, 0x92U,
0x6cU, 0x70U, 0x48U, 0x50U, 0xfdU, 0xedU, 0xb9U, 0xdaU, 0x5eU, 0x15U, 0x46U, 0x57U, 0xa7U, 0x8dU, 0x9dU, 0x84U,
0x90U, 0xd8U, 0xabU, 0x00U, 0x8cU, 0xbcU, 0xd3U, 0x0aU, 0xf7U, 0xe4U, 0x58U, 0x05U, 0xb8U, 0xb3U, 0x45U, 0x06U,
0xd0U, 0x2cU, 0x1eU, 0x8fU, 0xcaU, 0x3fU, 0x0fU, 0x02U, 0xc1U, 0xafU, 0xbdU, 0x03U, 0x01U, 0x13U, 0x8aU, 0x6bU,
0x3aU, 0x91U, 0x11U, 0x41U, 0x4fU, 0x67U, 0xdcU, 0xeaU, 0x97U, 0xf2U, 0xcfU, 0xceU, 0xf0U, 0xb4U, 0xe6U, 0x73U,
0x96U, 0xacU, 0x74U, 0x22U, 0xe7U, 0xadU, 0x35U, 0x85U, 0xe2U, 0xf9U, 0x37U, 0xe8U, 0x1cU, 0x75U, 0xdfU, 0x6eU,
0x47U, 0xf1U, 0x1aU, 0x71U, 0x1dU, 0x29U, 0xc5U, 0x89U, 0x6fU, 0xb7U, 0x62U, 0x0eU, 0xaaU, 0x18U, 0xbeU, 0x1bU,
0xfcU, 0x56U, 0x3eU, 0x4bU, 0xc6U, 0xd2U, 0x79U, 0x20U, 0x9aU, 0xdbU, 0xc0U, 0xfeU, 0x78U, 0xcdU, 0x5aU, 0xf4U,
0x1fU, 0xddU, 0xa8U, 0x33U, 0x88U, 0x07U, 0xc7U, 0x31U, 0xb1U, 0x12U, 0x10U, 0x59U, 0x27U, 0x80U, 0xecU, 0x5fU,
0x60U, 0x51U, 0x7fU, 0xa9U, 0x19U, 0xb5U, 0x4aU, 0x0dU, 0x2dU, 0xe5U, 0x7aU, 0x9fU, 0x93U, 0xc9U, 0x9cU, 0xefU,
0xa0U, 0xe0U, 0x3bU, 0x4dU, 0xaeU, 0x2aU, 0xf5U, 0xb0U, 0xc8U, 0xebU, 0xbbU, 0x3cU, 0x83U, 0x53U, 0x99U, 0x61U,
0x17U, 0x2bU, 0x04U, 0x7eU, 0xbaU, 0x77U, 0xd6U, 0x26U, 0xe1U, 0x69U, 0x14U, 0x63U, 0x55U, 0x21U, 0x0cU, 0x7dU,
};
static const uint32_t RCON[] = {
0x01000000, 0x02000000, 0x04000000, 0x08000000, 0x10000000, 0x20000000, 0x40000000, 0x80000000, 0x1B000000,
0x36000000,
};
#ifndef HITLS_CRYPTO_AES_PRECALC_TABLES
uint32_t RoundConstArray(uint8_t val)
{
return RCON[val];
}
uint8_t InvSubSbox(uint8_t val)
{
return INV_S[val];
}
#endif
#ifdef HITLS_CRYPTO_AES_PRECALC_TABLES
#define TESEARCH(t0, t1, t2, t3) \
(((uint32_t)TE2[((t0) >> 24)] & 0xff000000) ^ ((uint32_t)TE3[((t1) >> 16) & 0xff] & 0x00ff0000) ^ \
((uint32_t)TE0[((t2) >> 8) & 0xff] & 0x0000ff00) ^ ((uint32_t)TE1[(t3) & 0xff] & 0x000000ff))
#define INVSSEARCH(t0, t1, t2, t3) \
(((uint32_t)INV_S[((t0) >> 24)] << 24) ^ ((uint32_t)INV_S[((t3) >> 16) & 0xff] << 16) ^ \
((uint32_t)INV_S[((t2) >> 8) & 0xff] << 8) ^ ((uint32_t)INV_S[(t1) & 0xff]))
static const uint32_t TE0[256] = {
0xc66363a5U, 0xf87c7c84U, 0xee777799U, 0xf67b7b8dU,
0xfff2f20dU, 0xd66b6bbdU, 0xde6f6fb1U, 0x91c5c554U,
0x60303050U, 0x02010103U, 0xce6767a9U, 0x562b2b7dU,
0xe7fefe19U, 0xb5d7d762U, 0x4dababe6U, 0xec76769aU,
0x8fcaca45U, 0x1f82829dU, 0x89c9c940U, 0xfa7d7d87U,
0xeffafa15U, 0xb25959ebU, 0x8e4747c9U, 0xfbf0f00bU,
0x41adadecU, 0xb3d4d467U, 0x5fa2a2fdU, 0x45afafeaU,
0x239c9cbfU, 0x53a4a4f7U, 0xe4727296U, 0x9bc0c05bU,
0x75b7b7c2U, 0xe1fdfd1cU, 0x3d9393aeU, 0x4c26266aU,
0x6c36365aU, 0x7e3f3f41U, 0xf5f7f702U, 0x83cccc4fU,
0x6834345cU, 0x51a5a5f4U, 0xd1e5e534U, 0xf9f1f108U,
0xe2717193U, 0xabd8d873U, 0x62313153U, 0x2a15153fU,
0x0804040cU, 0x95c7c752U, 0x46232365U, 0x9dc3c35eU,
0x30181828U, 0x379696a1U, 0x0a05050fU, 0x2f9a9ab5U,
0x0e070709U, 0x24121236U, 0x1b80809bU, 0xdfe2e23dU,
0xcdebeb26U, 0x4e272769U, 0x7fb2b2cdU, 0xea75759fU,
0x1209091bU, 0x1d83839eU, 0x582c2c74U, 0x341a1a2eU,
0x361b1b2dU, 0xdc6e6eb2U, 0xb45a5aeeU, 0x5ba0a0fbU,
0xa45252f6U, 0x763b3b4dU, 0xb7d6d661U, 0x7db3b3ceU,
0x5229297bU, 0xdde3e33eU, 0x5e2f2f71U, 0x13848497U,
0xa65353f5U, 0xb9d1d168U, 0x00000000U, 0xc1eded2cU,
0x40202060U, 0xe3fcfc1fU, 0x79b1b1c8U, 0xb65b5bedU,
0xd46a6abeU, 0x8dcbcb46U, 0x67bebed9U, 0x7239394bU,
0x944a4adeU, 0x984c4cd4U, 0xb05858e8U, 0x85cfcf4aU,
0xbbd0d06bU, 0xc5efef2aU, 0x4faaaae5U, 0xedfbfb16U,
0x864343c5U, 0x9a4d4dd7U, 0x66333355U, 0x11858594U,
0x8a4545cfU, 0xe9f9f910U, 0x04020206U, 0xfe7f7f81U,
0xa05050f0U, 0x783c3c44U, 0x259f9fbaU, 0x4ba8a8e3U,
0xa25151f3U, 0x5da3a3feU, 0x804040c0U, 0x058f8f8aU,
0x3f9292adU, 0x219d9dbcU, 0x70383848U, 0xf1f5f504U,
0x63bcbcdfU, 0x77b6b6c1U, 0xafdada75U, 0x42212163U,
0x20101030U, 0xe5ffff1aU, 0xfdf3f30eU, 0xbfd2d26dU,
0x81cdcd4cU, 0x180c0c14U, 0x26131335U, 0xc3ecec2fU,
0xbe5f5fe1U, 0x359797a2U, 0x884444ccU, 0x2e171739U,
0x93c4c457U, 0x55a7a7f2U, 0xfc7e7e82U, 0x7a3d3d47U,
0xc86464acU, 0xba5d5de7U, 0x3219192bU, 0xe6737395U,
0xc06060a0U, 0x19818198U, 0x9e4f4fd1U, 0xa3dcdc7fU,
0x44222266U, 0x542a2a7eU, 0x3b9090abU, 0x0b888883U,
0x8c4646caU, 0xc7eeee29U, 0x6bb8b8d3U, 0x2814143cU,
0xa7dede79U, 0xbc5e5ee2U, 0x160b0b1dU, 0xaddbdb76U,
0xdbe0e03bU, 0x64323256U, 0x743a3a4eU, 0x140a0a1eU,
0x924949dbU, 0x0c06060aU, 0x4824246cU, 0xb85c5ce4U,
0x9fc2c25dU, 0xbdd3d36eU, 0x43acacefU, 0xc46262a6U,
0x399191a8U, 0x319595a4U, 0xd3e4e437U, 0xf279798bU,
0xd5e7e732U, 0x8bc8c843U, 0x6e373759U, 0xda6d6db7U,
0x018d8d8cU, 0xb1d5d564U, 0x9c4e4ed2U, 0x49a9a9e0U,
0xd86c6cb4U, 0xac5656faU, 0xf3f4f407U, 0xcfeaea25U,
0xca6565afU, 0xf47a7a8eU, 0x47aeaee9U, 0x10080818U,
0x6fbabad5U, 0xf0787888U, 0x4a25256fU, 0x5c2e2e72U,
0x381c1c24U, 0x57a6a6f1U, 0x73b4b4c7U, 0x97c6c651U,
0xcbe8e823U, 0xa1dddd7cU, 0xe874749cU, 0x3e1f1f21U,
0x964b4bddU, 0x61bdbddcU, 0x0d8b8b86U, 0x0f8a8a85U,
0xe0707090U, 0x7c3e3e42U, 0x71b5b5c4U, 0xcc6666aaU,
0x904848d8U, 0x06030305U, 0xf7f6f601U, 0x1c0e0e12U,
0xc26161a3U, 0x6a35355fU, 0xae5757f9U, 0x69b9b9d0U,
0x17868691U, 0x99c1c158U, 0x3a1d1d27U, 0x279e9eb9U,
0xd9e1e138U, 0xebf8f813U, 0x2b9898b3U, 0x22111133U,
0xd26969bbU, 0xa9d9d970U, 0x078e8e89U, 0x339494a7U,
0x2d9b9bb6U, 0x3c1e1e22U, 0x15878792U, 0xc9e9e920U,
0x87cece49U, 0xaa5555ffU, 0x50282878U, 0xa5dfdf7aU,
0x038c8c8fU, 0x59a1a1f8U, 0x09898980U, 0x1a0d0d17U,
0x65bfbfdaU, 0xd7e6e631U, 0x844242c6U, 0xd06868b8U,
0x824141c3U, 0x299999b0U, 0x5a2d2d77U, 0x1e0f0f11U,
0x7bb0b0cbU, 0xa85454fcU, 0x6dbbbbd6U, 0x2c16163aU,
};
static const uint32_t TE1[256] = {
0xa5c66363U, 0x84f87c7cU, 0x99ee7777U, 0x8df67b7bU,
0x0dfff2f2U, 0xbdd66b6bU, 0xb1de6f6fU, 0x5491c5c5U,
0x50603030U, 0x03020101U, 0xa9ce6767U, 0x7d562b2bU,
0x19e7fefeU, 0x62b5d7d7U, 0xe64dababU, 0x9aec7676U,
0x458fcacaU, 0x9d1f8282U, 0x4089c9c9U, 0x87fa7d7dU,
0x15effafaU, 0xebb25959U, 0xc98e4747U, 0x0bfbf0f0U,
0xec41adadU, 0x67b3d4d4U, 0xfd5fa2a2U, 0xea45afafU,
0xbf239c9cU, 0xf753a4a4U, 0x96e47272U, 0x5b9bc0c0U,
0xc275b7b7U, 0x1ce1fdfdU, 0xae3d9393U, 0x6a4c2626U,
0x5a6c3636U, 0x417e3f3fU, 0x02f5f7f7U, 0x4f83ccccU,
0x5c683434U, 0xf451a5a5U, 0x34d1e5e5U, 0x08f9f1f1U,
0x93e27171U, 0x73abd8d8U, 0x53623131U, 0x3f2a1515U,
0x0c080404U, 0x5295c7c7U, 0x65462323U, 0x5e9dc3c3U,
0x28301818U, 0xa1379696U, 0x0f0a0505U, 0xb52f9a9aU,
0x090e0707U, 0x36241212U, 0x9b1b8080U, 0x3ddfe2e2U,
0x26cdebebU, 0x694e2727U, 0xcd7fb2b2U, 0x9fea7575U,
0x1b120909U, 0x9e1d8383U, 0x74582c2cU, 0x2e341a1aU,
0x2d361b1bU, 0xb2dc6e6eU, 0xeeb45a5aU, 0xfb5ba0a0U,
0xf6a45252U, 0x4d763b3bU, 0x61b7d6d6U, 0xce7db3b3U,
0x7b522929U, 0x3edde3e3U, 0x715e2f2fU, 0x97138484U,
0xf5a65353U, 0x68b9d1d1U, 0x00000000U, 0x2cc1ededU,
0x60402020U, 0x1fe3fcfcU, 0xc879b1b1U, 0xedb65b5bU,
0xbed46a6aU, 0x468dcbcbU, 0xd967bebeU, 0x4b723939U,
0xde944a4aU, 0xd4984c4cU, 0xe8b05858U, 0x4a85cfcfU,
0x6bbbd0d0U, 0x2ac5efefU, 0xe54faaaaU, 0x16edfbfbU,
0xc5864343U, 0xd79a4d4dU, 0x55663333U, 0x94118585U,
0xcf8a4545U, 0x10e9f9f9U, 0x06040202U, 0x81fe7f7fU,
0xf0a05050U, 0x44783c3cU, 0xba259f9fU, 0xe34ba8a8U,
0xf3a25151U, 0xfe5da3a3U, 0xc0804040U, 0x8a058f8fU,
0xad3f9292U, 0xbc219d9dU, 0x48703838U, 0x04f1f5f5U,
0xdf63bcbcU, 0xc177b6b6U, 0x75afdadaU, 0x63422121U,
0x30201010U, 0x1ae5ffffU, 0x0efdf3f3U, 0x6dbfd2d2U,
0x4c81cdcdU, 0x14180c0cU, 0x35261313U, 0x2fc3ececU,
0xe1be5f5fU, 0xa2359797U, 0xcc884444U, 0x392e1717U,
0x5793c4c4U, 0xf255a7a7U, 0x82fc7e7eU, 0x477a3d3dU,
0xacc86464U, 0xe7ba5d5dU, 0x2b321919U, 0x95e67373U,
0xa0c06060U, 0x98198181U, 0xd19e4f4fU, 0x7fa3dcdcU,
0x66442222U, 0x7e542a2aU, 0xab3b9090U, 0x830b8888U,
0xca8c4646U, 0x29c7eeeeU, 0xd36bb8b8U, 0x3c281414U,
0x79a7dedeU, 0xe2bc5e5eU, 0x1d160b0bU, 0x76addbdbU,
0x3bdbe0e0U, 0x56643232U, 0x4e743a3aU, 0x1e140a0aU,
0xdb924949U, 0x0a0c0606U, 0x6c482424U, 0xe4b85c5cU,
0x5d9fc2c2U, 0x6ebdd3d3U, 0xef43acacU, 0xa6c46262U,
0xa8399191U, 0xa4319595U, 0x37d3e4e4U, 0x8bf27979U,
0x32d5e7e7U, 0x438bc8c8U, 0x596e3737U, 0xb7da6d6dU,
0x8c018d8dU, 0x64b1d5d5U, 0xd29c4e4eU, 0xe049a9a9U,
0xb4d86c6cU, 0xfaac5656U, 0x07f3f4f4U, 0x25cfeaeaU,
0xafca6565U, 0x8ef47a7aU, 0xe947aeaeU, 0x18100808U,
0xd56fbabaU, 0x88f07878U, 0x6f4a2525U, 0x725c2e2eU,
0x24381c1cU, 0xf157a6a6U, 0xc773b4b4U, 0x5197c6c6U,
0x23cbe8e8U, 0x7ca1ddddU, 0x9ce87474U, 0x213e1f1fU,
0xdd964b4bU, 0xdc61bdbdU, 0x860d8b8bU, 0x850f8a8aU,
0x90e07070U, 0x427c3e3eU, 0xc471b5b5U, 0xaacc6666U,
0xd8904848U, 0x05060303U, 0x01f7f6f6U, 0x121c0e0eU,
0xa3c26161U, 0x5f6a3535U, 0xf9ae5757U, 0xd069b9b9U,
0x91178686U, 0x5899c1c1U, 0x273a1d1dU, 0xb9279e9eU,
0x38d9e1e1U, 0x13ebf8f8U, 0xb32b9898U, 0x33221111U,
0xbbd26969U, 0x70a9d9d9U, 0x89078e8eU, 0xa7339494U,
0xb62d9b9bU, 0x223c1e1eU, 0x92158787U, 0x20c9e9e9U,
0x4987ceceU, 0xffaa5555U, 0x78502828U, 0x7aa5dfdfU,
0x8f038c8cU, 0xf859a1a1U, 0x80098989U, 0x171a0d0dU,
0xda65bfbfU, 0x31d7e6e6U, 0xc6844242U, 0xb8d06868U,
0xc3824141U, 0xb0299999U, 0x775a2d2dU, 0x111e0f0fU,
0xcb7bb0b0U, 0xfca85454U, 0xd66dbbbbU, 0x3a2c1616U,
};
static const uint32_t TE2[256] = {
0x63a5c663U, 0x7c84f87cU, 0x7799ee77U, 0x7b8df67bU,
0xf20dfff2U, 0x6bbdd66bU, 0x6fb1de6fU, 0xc55491c5U,
0x30506030U, 0x01030201U, 0x67a9ce67U, 0x2b7d562bU,
0xfe19e7feU, 0xd762b5d7U, 0xabe64dabU, 0x769aec76U,
0xca458fcaU, 0x829d1f82U, 0xc94089c9U, 0x7d87fa7dU,
0xfa15effaU, 0x59ebb259U, 0x47c98e47U, 0xf00bfbf0U,
0xadec41adU, 0xd467b3d4U, 0xa2fd5fa2U, 0xafea45afU,
0x9cbf239cU, 0xa4f753a4U, 0x7296e472U, 0xc05b9bc0U,
0xb7c275b7U, 0xfd1ce1fdU, 0x93ae3d93U, 0x266a4c26U,
0x365a6c36U, 0x3f417e3fU, 0xf702f5f7U, 0xcc4f83ccU,
0x345c6834U, 0xa5f451a5U, 0xe534d1e5U, 0xf108f9f1U,
0x7193e271U, 0xd873abd8U, 0x31536231U, 0x153f2a15U,
0x040c0804U, 0xc75295c7U, 0x23654623U, 0xc35e9dc3U,
0x18283018U, 0x96a13796U, 0x050f0a05U, 0x9ab52f9aU,
0x07090e07U, 0x12362412U, 0x809b1b80U, 0xe23ddfe2U,
0xeb26cdebU, 0x27694e27U, 0xb2cd7fb2U, 0x759fea75U,
0x091b1209U, 0x839e1d83U, 0x2c74582cU, 0x1a2e341aU,
0x1b2d361bU, 0x6eb2dc6eU, 0x5aeeb45aU, 0xa0fb5ba0U,
0x52f6a452U, 0x3b4d763bU, 0xd661b7d6U, 0xb3ce7db3U,
0x297b5229U, 0xe33edde3U, 0x2f715e2fU, 0x84971384U,
0x53f5a653U, 0xd168b9d1U, 0x00000000U, 0xed2cc1edU,
0x20604020U, 0xfc1fe3fcU, 0xb1c879b1U, 0x5bedb65bU,
0x6abed46aU, 0xcb468dcbU, 0xbed967beU, 0x394b7239U,
0x4ade944aU, 0x4cd4984cU, 0x58e8b058U, 0xcf4a85cfU,
0xd06bbbd0U, 0xef2ac5efU, 0xaae54faaU, 0xfb16edfbU,
0x43c58643U, 0x4dd79a4dU, 0x33556633U, 0x85941185U,
0x45cf8a45U, 0xf910e9f9U, 0x02060402U, 0x7f81fe7fU,
0x50f0a050U, 0x3c44783cU, 0x9fba259fU, 0xa8e34ba8U,
0x51f3a251U, 0xa3fe5da3U, 0x40c08040U, 0x8f8a058fU,
0x92ad3f92U, 0x9dbc219dU, 0x38487038U, 0xf504f1f5U,
0xbcdf63bcU, 0xb6c177b6U, 0xda75afdaU, 0x21634221U,
0x10302010U, 0xff1ae5ffU, 0xf30efdf3U, 0xd26dbfd2U,
0xcd4c81cdU, 0x0c14180cU, 0x13352613U, 0xec2fc3ecU,
0x5fe1be5fU, 0x97a23597U, 0x44cc8844U, 0x17392e17U,
0xc45793c4U, 0xa7f255a7U, 0x7e82fc7eU, 0x3d477a3dU,
0x64acc864U, 0x5de7ba5dU, 0x192b3219U, 0x7395e673U,
0x60a0c060U, 0x81981981U, 0x4fd19e4fU, 0xdc7fa3dcU,
0x22664422U, 0x2a7e542aU, 0x90ab3b90U, 0x88830b88U,
0x46ca8c46U, 0xee29c7eeU, 0xb8d36bb8U, 0x143c2814U,
0xde79a7deU, 0x5ee2bc5eU, 0x0b1d160bU, 0xdb76addbU,
0xe03bdbe0U, 0x32566432U, 0x3a4e743aU, 0x0a1e140aU,
0x49db9249U, 0x060a0c06U, 0x246c4824U, 0x5ce4b85cU,
0xc25d9fc2U, 0xd36ebdd3U, 0xacef43acU, 0x62a6c462U,
0x91a83991U, 0x95a43195U, 0xe437d3e4U, 0x798bf279U,
0xe732d5e7U, 0xc8438bc8U, 0x37596e37U, 0x6db7da6dU,
0x8d8c018dU, 0xd564b1d5U, 0x4ed29c4eU, 0xa9e049a9U,
0x6cb4d86cU, 0x56faac56U, 0xf407f3f4U, 0xea25cfeaU,
0x65afca65U, 0x7a8ef47aU, 0xaee947aeU, 0x08181008U,
0xbad56fbaU, 0x7888f078U, 0x256f4a25U, 0x2e725c2eU,
0x1c24381cU, 0xa6f157a6U, 0xb4c773b4U, 0xc65197c6U,
0xe823cbe8U, 0xdd7ca1ddU, 0x749ce874U, 0x1f213e1fU,
0x4bdd964bU, 0xbddc61bdU, 0x8b860d8bU, 0x8a850f8aU,
0x7090e070U, 0x3e427c3eU, 0xb5c471b5U, 0x66aacc66U,
0x48d89048U, 0x03050603U, 0xf601f7f6U, 0x0e121c0eU,
0x61a3c261U, 0x355f6a35U, 0x57f9ae57U, 0xb9d069b9U,
0x86911786U, 0xc15899c1U, 0x1d273a1dU, 0x9eb9279eU,
0xe138d9e1U, 0xf813ebf8U, 0x98b32b98U, 0x11332211U,
0x69bbd269U, 0xd970a9d9U, 0x8e89078eU, 0x94a73394U,
0x9bb62d9bU, 0x1e223c1eU, 0x87921587U, 0xe920c9e9U,
0xce4987ceU, 0x55ffaa55U, 0x28785028U, 0xdf7aa5dfU,
0x8c8f038cU, 0xa1f859a1U, 0x89800989U, 0x0d171a0dU,
0xbfda65bfU, 0xe631d7e6U, 0x42c68442U, 0x68b8d068U,
0x41c38241U, 0x99b02999U, 0x2d775a2dU, 0x0f111e0fU,
0xb0cb7bb0U, 0x54fca854U, 0xbbd66dbbU, 0x163a2c16U,
};
static const uint32_t TE3[256] = {
0x6363a5c6U, 0x7c7c84f8U, 0x777799eeU, 0x7b7b8df6U,
0xf2f20dffU, 0x6b6bbdd6U, 0x6f6fb1deU, 0xc5c55491U,
0x30305060U, 0x01010302U, 0x6767a9ceU, 0x2b2b7d56U,
0xfefe19e7U, 0xd7d762b5U, 0xababe64dU, 0x76769aecU,
0xcaca458fU, 0x82829d1fU, 0xc9c94089U, 0x7d7d87faU,
0xfafa15efU, 0x5959ebb2U, 0x4747c98eU, 0xf0f00bfbU,
0xadadec41U, 0xd4d467b3U, 0xa2a2fd5fU, 0xafafea45U,
0x9c9cbf23U, 0xa4a4f753U, 0x727296e4U, 0xc0c05b9bU,
0xb7b7c275U, 0xfdfd1ce1U, 0x9393ae3dU, 0x26266a4cU,
0x36365a6cU, 0x3f3f417eU, 0xf7f702f5U, 0xcccc4f83U,
0x34345c68U, 0xa5a5f451U, 0xe5e534d1U, 0xf1f108f9U,
0x717193e2U, 0xd8d873abU, 0x31315362U, 0x15153f2aU,
0x04040c08U, 0xc7c75295U, 0x23236546U, 0xc3c35e9dU,
0x18182830U, 0x9696a137U, 0x05050f0aU, 0x9a9ab52fU,
0x0707090eU, 0x12123624U, 0x80809b1bU, 0xe2e23ddfU,
0xebeb26cdU, 0x2727694eU, 0xb2b2cd7fU, 0x75759feaU,
0x09091b12U, 0x83839e1dU, 0x2c2c7458U, 0x1a1a2e34U,
0x1b1b2d36U, 0x6e6eb2dcU, 0x5a5aeeb4U, 0xa0a0fb5bU,
0x5252f6a4U, 0x3b3b4d76U, 0xd6d661b7U, 0xb3b3ce7dU,
0x29297b52U, 0xe3e33eddU, 0x2f2f715eU, 0x84849713U,
0x5353f5a6U, 0xd1d168b9U, 0x00000000U, 0xeded2cc1U,
0x20206040U, 0xfcfc1fe3U, 0xb1b1c879U, 0x5b5bedb6U,
0x6a6abed4U, 0xcbcb468dU, 0xbebed967U, 0x39394b72U,
0x4a4ade94U, 0x4c4cd498U, 0x5858e8b0U, 0xcfcf4a85U,
0xd0d06bbbU, 0xefef2ac5U, 0xaaaae54fU, 0xfbfb16edU,
0x4343c586U, 0x4d4dd79aU, 0x33335566U, 0x85859411U,
0x4545cf8aU, 0xf9f910e9U, 0x02020604U, 0x7f7f81feU,
0x5050f0a0U, 0x3c3c4478U, 0x9f9fba25U, 0xa8a8e34bU,
0x5151f3a2U, 0xa3a3fe5dU, 0x4040c080U, 0x8f8f8a05U,
0x9292ad3fU, 0x9d9dbc21U, 0x38384870U, 0xf5f504f1U,
0xbcbcdf63U, 0xb6b6c177U, 0xdada75afU, 0x21216342U,
0x10103020U, 0xffff1ae5U, 0xf3f30efdU, 0xd2d26dbfU,
0xcdcd4c81U, 0x0c0c1418U, 0x13133526U, 0xecec2fc3U,
0x5f5fe1beU, 0x9797a235U, 0x4444cc88U, 0x1717392eU,
0xc4c45793U, 0xa7a7f255U, 0x7e7e82fcU, 0x3d3d477aU,
0x6464acc8U, 0x5d5de7baU, 0x19192b32U, 0x737395e6U,
0x6060a0c0U, 0x81819819U, 0x4f4fd19eU, 0xdcdc7fa3U,
0x22226644U, 0x2a2a7e54U, 0x9090ab3bU, 0x8888830bU,
0x4646ca8cU, 0xeeee29c7U, 0xb8b8d36bU, 0x14143c28U,
0xdede79a7U, 0x5e5ee2bcU, 0x0b0b1d16U, 0xdbdb76adU,
0xe0e03bdbU, 0x32325664U, 0x3a3a4e74U, 0x0a0a1e14U,
0x4949db92U, 0x06060a0cU, 0x24246c48U, 0x5c5ce4b8U,
0xc2c25d9fU, 0xd3d36ebdU, 0xacacef43U, 0x6262a6c4U,
0x9191a839U, 0x9595a431U, 0xe4e437d3U, 0x79798bf2U,
0xe7e732d5U, 0xc8c8438bU, 0x3737596eU, 0x6d6db7daU,
0x8d8d8c01U, 0xd5d564b1U, 0x4e4ed29cU, 0xa9a9e049U,
0x6c6cb4d8U, 0x5656faacU, 0xf4f407f3U, 0xeaea25cfU,
0x6565afcaU, 0x7a7a8ef4U, 0xaeaee947U, 0x08081810U,
0xbabad56fU, 0x787888f0U, 0x25256f4aU, 0x2e2e725cU,
0x1c1c2438U, 0xa6a6f157U, 0xb4b4c773U, 0xc6c65197U,
0xe8e823cbU, 0xdddd7ca1U, 0x74749ce8U, 0x1f1f213eU,
0x4b4bdd96U, 0xbdbddc61U, 0x8b8b860dU, 0x8a8a850fU,
0x707090e0U, 0x3e3e427cU, 0xb5b5c471U, 0x6666aaccU,
0x4848d890U, 0x03030506U, 0xf6f601f7U, 0x0e0e121cU,
0x6161a3c2U, 0x35355f6aU, 0x5757f9aeU, 0xb9b9d069U,
0x86869117U, 0xc1c15899U, 0x1d1d273aU, 0x9e9eb927U,
0xe1e138d9U, 0xf8f813ebU, 0x9898b32bU, 0x11113322U,
0x6969bbd2U, 0xd9d970a9U, 0x8e8e8907U, 0x9494a733U,
0x9b9bb62dU, 0x1e1e223cU, 0x87879215U, 0xe9e920c9U,
0xcece4987U, 0x5555ffaaU, 0x28287850U, 0xdfdf7aa5U,
0x8c8c8f03U, 0xa1a1f859U, 0x89898009U, 0x0d0d171aU,
0xbfbfda65U, 0xe6e631d7U, 0x4242c684U, 0x6868b8d0U,
0x4141c382U, 0x9999b029U, 0x2d2d775aU, 0x0f0f111eU,
0xb0b0cb7bU, 0x5454fca8U, 0xbbbbd66dU, 0x16163a2cU,
};
static const uint32_t TD0[256] = {
0x51f4a750U, 0x7e416553U, 0x1a17a4c3U, 0x3a275e96U,
0x3bab6bcbU, 0x1f9d45f1U, 0xacfa58abU, 0x4be30393U,
0x2030fa55U, 0xad766df6U, 0x88cc7691U, 0xf5024c25U,
0x4fe5d7fcU, 0xc52acbd7U, 0x26354480U, 0xb562a38fU,
0xdeb15a49U, 0x25ba1b67U, 0x45ea0e98U, 0x5dfec0e1U,
0xc32f7502U, 0x814cf012U, 0x8d4697a3U, 0x6bd3f9c6U,
0x038f5fe7U, 0x15929c95U, 0xbf6d7aebU, 0x955259daU,
0xd4be832dU, 0x587421d3U, 0x49e06929U, 0x8ec9c844U,
0x75c2896aU, 0xf48e7978U, 0x99583e6bU, 0x27b971ddU,
0xbee14fb6U, 0xf088ad17U, 0xc920ac66U, 0x7dce3ab4U,
0x63df4a18U, 0xe51a3182U, 0x97513360U, 0x62537f45U,
0xb16477e0U, 0xbb6bae84U, 0xfe81a01cU, 0xf9082b94U,
0x70486858U, 0x8f45fd19U, 0x94de6c87U, 0x527bf8b7U,
0xab73d323U, 0x724b02e2U, 0xe31f8f57U, 0x6655ab2aU,
0xb2eb2807U, 0x2fb5c203U, 0x86c57b9aU, 0xd33708a5U,
0x302887f2U, 0x23bfa5b2U, 0x02036abaU, 0xed16825cU,
0x8acf1c2bU, 0xa779b492U, 0xf307f2f0U, 0x4e69e2a1U,
0x65daf4cdU, 0x0605bed5U, 0xd134621fU, 0xc4a6fe8aU,
0x342e539dU, 0xa2f355a0U, 0x058ae132U, 0xa4f6eb75U,
0x0b83ec39U, 0x4060efaaU, 0x5e719f06U, 0xbd6e1051U,
0x3e218af9U, 0x96dd063dU, 0xdd3e05aeU, 0x4de6bd46U,
0x91548db5U, 0x71c45d05U, 0x0406d46fU, 0x605015ffU,
0x1998fb24U, 0xd6bde997U, 0x894043ccU, 0x67d99e77U,
0xb0e842bdU, 0x07898b88U, 0xe7195b38U, 0x79c8eedbU,
0xa17c0a47U, 0x7c420fe9U, 0xf8841ec9U, 0x00000000U,
0x09808683U, 0x322bed48U, 0x1e1170acU, 0x6c5a724eU,
0xfd0efffbU, 0x0f853856U, 0x3daed51eU, 0x362d3927U,
0x0a0fd964U, 0x685ca621U, 0x9b5b54d1U, 0x24362e3aU,
0x0c0a67b1U, 0x9357e70fU, 0xb4ee96d2U, 0x1b9b919eU,
0x80c0c54fU, 0x61dc20a2U, 0x5a774b69U, 0x1c121a16U,
0xe293ba0aU, 0xc0a02ae5U, 0x3c22e043U, 0x121b171dU,
0x0e090d0bU, 0xf28bc7adU, 0x2db6a8b9U, 0x141ea9c8U,
0x57f11985U, 0xaf75074cU, 0xee99ddbbU, 0xa37f60fdU,
0xf701269fU, 0x5c72f5bcU, 0x44663bc5U, 0x5bfb7e34U,
0x8b432976U, 0xcb23c6dcU, 0xb6edfc68U, 0xb8e4f163U,
0xd731dccaU, 0x42638510U, 0x13972240U, 0x84c61120U,
0x854a247dU, 0xd2bb3df8U, 0xaef93211U, 0xc729a16dU,
0x1d9e2f4bU, 0xdcb230f3U, 0x0d8652ecU, 0x77c1e3d0U,
0x2bb3166cU, 0xa970b999U, 0x119448faU, 0x47e96422U,
0xa8fc8cc4U, 0xa0f03f1aU, 0x567d2cd8U, 0x223390efU,
0x87494ec7U, 0xd938d1c1U, 0x8ccaa2feU, 0x98d40b36U,
0xa6f581cfU, 0xa57ade28U, 0xdab78e26U, 0x3fadbfa4U,
0x2c3a9de4U, 0x5078920dU, 0x6a5fcc9bU, 0x547e4662U,
0xf68d13c2U, 0x90d8b8e8U, 0x2e39f75eU, 0x82c3aff5U,
0x9f5d80beU, 0x69d0937cU, 0x6fd52da9U, 0xcf2512b3U,
0xc8ac993bU, 0x10187da7U, 0xe89c636eU, 0xdb3bbb7bU,
0xcd267809U, 0x6e5918f4U, 0xec9ab701U, 0x834f9aa8U,
0xe6956e65U, 0xaaffe67eU, 0x21bccf08U, 0xef15e8e6U,
0xbae79bd9U, 0x4a6f36ceU, 0xea9f09d4U, 0x29b07cd6U,
0x31a4b2afU, 0x2a3f2331U, 0xc6a59430U, 0x35a266c0U,
0x744ebc37U, 0xfc82caa6U, 0xe090d0b0U, 0x33a7d815U,
0xf104984aU, 0x41ecdaf7U, 0x7fcd500eU, 0x1791f62fU,
0x764dd68dU, 0x43efb04dU, 0xccaa4d54U, 0xe49604dfU,
0x9ed1b5e3U, 0x4c6a881bU, 0xc12c1fb8U, 0x4665517fU,
0x9d5eea04U, 0x018c355dU, 0xfa877473U, 0xfb0b412eU,
0xb3671d5aU, 0x92dbd252U, 0xe9105633U, 0x6dd64713U,
0x9ad7618cU, 0x37a10c7aU, 0x59f8148eU, 0xeb133c89U,
0xcea927eeU, 0xb761c935U, 0xe11ce5edU, 0x7a47b13cU,
0x9cd2df59U, 0x55f2733fU, 0x1814ce79U, 0x73c737bfU,
0x53f7cdeaU, 0x5ffdaa5bU, 0xdf3d6f14U, 0x7844db86U,
0xcaaff381U, 0xb968c43eU, 0x3824342cU, 0xc2a3405fU,
0x161dc372U, 0xbce2250cU, 0x283c498bU, 0xff0d9541U,
0x39a80171U, 0x080cb3deU, 0xd8b4e49cU, 0x6456c190U,
0x7bcb8461U, 0xd532b670U, 0x486c5c74U, 0xd0b85742U,
};
static const uint32_t TD1[256] = {
0x5051f4a7U, 0x537e4165U, 0xc31a17a4U, 0x963a275eU,
0xcb3bab6bU, 0xf11f9d45U, 0xabacfa58U, 0x934be303U,
0x552030faU, 0xf6ad766dU, 0x9188cc76U, 0x25f5024cU,
0xfc4fe5d7U, 0xd7c52acbU, 0x80263544U, 0x8fb562a3U,
0x49deb15aU, 0x6725ba1bU, 0x9845ea0eU, 0xe15dfec0U,
0x02c32f75U, 0x12814cf0U, 0xa38d4697U, 0xc66bd3f9U,
0xe7038f5fU, 0x9515929cU, 0xebbf6d7aU, 0xda955259U,
0x2dd4be83U, 0xd3587421U, 0x2949e069U, 0x448ec9c8U,
0x6a75c289U, 0x78f48e79U, 0x6b99583eU, 0xdd27b971U,
0xb6bee14fU, 0x17f088adU, 0x66c920acU, 0xb47dce3aU,
0x1863df4aU, 0x82e51a31U, 0x60975133U, 0x4562537fU,
0xe0b16477U, 0x84bb6baeU, 0x1cfe81a0U, 0x94f9082bU,
0x58704868U, 0x198f45fdU, 0x8794de6cU, 0xb7527bf8U,
0x23ab73d3U, 0xe2724b02U, 0x57e31f8fU, 0x2a6655abU,
0x07b2eb28U, 0x032fb5c2U, 0x9a86c57bU, 0xa5d33708U,
0xf2302887U, 0xb223bfa5U, 0xba02036aU, 0x5ced1682U,
0x2b8acf1cU, 0x92a779b4U, 0xf0f307f2U, 0xa14e69e2U,
0xcd65daf4U, 0xd50605beU, 0x1fd13462U, 0x8ac4a6feU,
0x9d342e53U, 0xa0a2f355U, 0x32058ae1U, 0x75a4f6ebU,
0x390b83ecU, 0xaa4060efU, 0x065e719fU, 0x51bd6e10U,
0xf93e218aU, 0x3d96dd06U, 0xaedd3e05U, 0x464de6bdU,
0xb591548dU, 0x0571c45dU, 0x6f0406d4U, 0xff605015U,
0x241998fbU, 0x97d6bde9U, 0xcc894043U, 0x7767d99eU,
0xbdb0e842U, 0x8807898bU, 0x38e7195bU, 0xdb79c8eeU,
0x47a17c0aU, 0xe97c420fU, 0xc9f8841eU, 0x00000000U,
0x83098086U, 0x48322bedU, 0xac1e1170U, 0x4e6c5a72U,
0xfbfd0effU, 0x560f8538U, 0x1e3daed5U, 0x27362d39U,
0x640a0fd9U, 0x21685ca6U, 0xd19b5b54U, 0x3a24362eU,
0xb10c0a67U, 0x0f9357e7U, 0xd2b4ee96U, 0x9e1b9b91U,
0x4f80c0c5U, 0xa261dc20U, 0x695a774bU, 0x161c121aU,
0x0ae293baU, 0xe5c0a02aU, 0x433c22e0U, 0x1d121b17U,
0x0b0e090dU, 0xadf28bc7U, 0xb92db6a8U, 0xc8141ea9U,
0x8557f119U, 0x4caf7507U, 0xbbee99ddU, 0xfda37f60U,
0x9ff70126U, 0xbc5c72f5U, 0xc544663bU, 0x345bfb7eU,
0x768b4329U, 0xdccb23c6U, 0x68b6edfcU, 0x63b8e4f1U,
0xcad731dcU, 0x10426385U, 0x40139722U, 0x2084c611U,
0x7d854a24U, 0xf8d2bb3dU, 0x11aef932U, 0x6dc729a1U,
0x4b1d9e2fU, 0xf3dcb230U, 0xec0d8652U, 0xd077c1e3U,
0x6c2bb316U, 0x99a970b9U, 0xfa119448U, 0x2247e964U,
0xc4a8fc8cU, 0x1aa0f03fU, 0xd8567d2cU, 0xef223390U,
0xc787494eU, 0xc1d938d1U, 0xfe8ccaa2U, 0x3698d40bU,
0xcfa6f581U, 0x28a57adeU, 0x26dab78eU, 0xa43fadbfU,
0xe42c3a9dU, 0x0d507892U, 0x9b6a5fccU, 0x62547e46U,
0xc2f68d13U, 0xe890d8b8U, 0x5e2e39f7U, 0xf582c3afU,
0xbe9f5d80U, 0x7c69d093U, 0xa96fd52dU, 0xb3cf2512U,
0x3bc8ac99U, 0xa710187dU, 0x6ee89c63U, 0x7bdb3bbbU,
0x09cd2678U, 0xf46e5918U, 0x01ec9ab7U, 0xa8834f9aU,
0x65e6956eU, 0x7eaaffe6U, 0x0821bccfU, 0xe6ef15e8U,
0xd9bae79bU, 0xce4a6f36U, 0xd4ea9f09U, 0xd629b07cU,
0xaf31a4b2U, 0x312a3f23U, 0x30c6a594U, 0xc035a266U,
0x37744ebcU, 0xa6fc82caU, 0xb0e090d0U, 0x1533a7d8U,
0x4af10498U, 0xf741ecdaU, 0x0e7fcd50U, 0x2f1791f6U,
0x8d764dd6U, 0x4d43efb0U, 0x54ccaa4dU, 0xdfe49604U,
0xe39ed1b5U, 0x1b4c6a88U, 0xb8c12c1fU, 0x7f466551U,
0x049d5eeaU, 0x5d018c35U, 0x73fa8774U, 0x2efb0b41U,
0x5ab3671dU, 0x5292dbd2U, 0x33e91056U, 0x136dd647U,
0x8c9ad761U, 0x7a37a10cU, 0x8e59f814U, 0x89eb133cU,
0xeecea927U, 0x35b761c9U, 0xede11ce5U, 0x3c7a47b1U,
0x599cd2dfU, 0x3f55f273U, 0x791814ceU, 0xbf73c737U,
0xea53f7cdU, 0x5b5ffdaaU, 0x14df3d6fU, 0x867844dbU,
0x81caaff3U, 0x3eb968c4U, 0x2c382434U, 0x5fc2a340U,
0x72161dc3U, 0x0cbce225U, 0x8b283c49U, 0x41ff0d95U,
0x7139a801U, 0xde080cb3U, 0x9cd8b4e4U, 0x906456c1U,
0x617bcb84U, 0x70d532b6U, 0x74486c5cU, 0x42d0b857U,
};
static const uint32_t TD2[256] = {
0xa75051f4U, 0x65537e41U, 0xa4c31a17U, 0x5e963a27U,
0x6bcb3babU, 0x45f11f9dU, 0x58abacfaU, 0x03934be3U,
0xfa552030U, 0x6df6ad76U, 0x769188ccU, 0x4c25f502U,
0xd7fc4fe5U, 0xcbd7c52aU, 0x44802635U, 0xa38fb562U,
0x5a49deb1U, 0x1b6725baU, 0x0e9845eaU, 0xc0e15dfeU,
0x7502c32fU, 0xf012814cU, 0x97a38d46U, 0xf9c66bd3U,
0x5fe7038fU, 0x9c951592U, 0x7aebbf6dU, 0x59da9552U,
0x832dd4beU, 0x21d35874U, 0x692949e0U, 0xc8448ec9U,
0x896a75c2U, 0x7978f48eU, 0x3e6b9958U, 0x71dd27b9U,
0x4fb6bee1U, 0xad17f088U, 0xac66c920U, 0x3ab47dceU,
0x4a1863dfU, 0x3182e51aU, 0x33609751U, 0x7f456253U,
0x77e0b164U, 0xae84bb6bU, 0xa01cfe81U, 0x2b94f908U,
0x68587048U, 0xfd198f45U, 0x6c8794deU, 0xf8b7527bU,
0xd323ab73U, 0x02e2724bU, 0x8f57e31fU, 0xab2a6655U,
0x2807b2ebU, 0xc2032fb5U, 0x7b9a86c5U, 0x08a5d337U,
0x87f23028U, 0xa5b223bfU, 0x6aba0203U, 0x825ced16U,
0x1c2b8acfU, 0xb492a779U, 0xf2f0f307U, 0xe2a14e69U,
0xf4cd65daU, 0xbed50605U, 0x621fd134U, 0xfe8ac4a6U,
0x539d342eU, 0x55a0a2f3U, 0xe132058aU, 0xeb75a4f6U,
0xec390b83U, 0xefaa4060U, 0x9f065e71U, 0x1051bd6eU,
0x8af93e21U, 0x063d96ddU, 0x05aedd3eU, 0xbd464de6U,
0x8db59154U, 0x5d0571c4U, 0xd46f0406U, 0x15ff6050U,
0xfb241998U, 0xe997d6bdU, 0x43cc8940U, 0x9e7767d9U,
0x42bdb0e8U, 0x8b880789U, 0x5b38e719U, 0xeedb79c8U,
0x0a47a17cU, 0x0fe97c42U, 0x1ec9f884U, 0x00000000U,
0x86830980U, 0xed48322bU, 0x70ac1e11U, 0x724e6c5aU,
0xfffbfd0eU, 0x38560f85U, 0xd51e3daeU, 0x3927362dU,
0xd9640a0fU, 0xa621685cU, 0x54d19b5bU, 0x2e3a2436U,
0x67b10c0aU, 0xe70f9357U, 0x96d2b4eeU, 0x919e1b9bU,
0xc54f80c0U, 0x20a261dcU, 0x4b695a77U, 0x1a161c12U,
0xba0ae293U, 0x2ae5c0a0U, 0xe0433c22U, 0x171d121bU,
0x0d0b0e09U, 0xc7adf28bU, 0xa8b92db6U, 0xa9c8141eU,
0x198557f1U, 0x074caf75U, 0xddbbee99U, 0x60fda37fU,
0x269ff701U, 0xf5bc5c72U, 0x3bc54466U, 0x7e345bfbU,
0x29768b43U, 0xc6dccb23U, 0xfc68b6edU, 0xf163b8e4U,
0xdccad731U, 0x85104263U, 0x22401397U, 0x112084c6U,
0x247d854aU, 0x3df8d2bbU, 0x3211aef9U, 0xa16dc729U,
0x2f4b1d9eU, 0x30f3dcb2U, 0x52ec0d86U, 0xe3d077c1U,
0x166c2bb3U, 0xb999a970U, 0x48fa1194U, 0x642247e9U,
0x8cc4a8fcU, 0x3f1aa0f0U, 0x2cd8567dU, 0x90ef2233U,
0x4ec78749U, 0xd1c1d938U, 0xa2fe8ccaU, 0x0b3698d4U,
0x81cfa6f5U, 0xde28a57aU, 0x8e26dab7U, 0xbfa43fadU,
0x9de42c3aU, 0x920d5078U, 0xcc9b6a5fU, 0x4662547eU,
0x13c2f68dU, 0xb8e890d8U, 0xf75e2e39U, 0xaff582c3U,
0x80be9f5dU, 0x937c69d0U, 0x2da96fd5U, 0x12b3cf25U,
0x993bc8acU, 0x7da71018U, 0x636ee89cU, 0xbb7bdb3bU,
0x7809cd26U, 0x18f46e59U, 0xb701ec9aU, 0x9aa8834fU,
0x6e65e695U, 0xe67eaaffU, 0xcf0821bcU, 0xe8e6ef15U,
0x9bd9bae7U, 0x36ce4a6fU, 0x09d4ea9fU, 0x7cd629b0U,
0xb2af31a4U, 0x23312a3fU, 0x9430c6a5U, 0x66c035a2U,
0xbc37744eU, 0xcaa6fc82U, 0xd0b0e090U, 0xd81533a7U,
0x984af104U, 0xdaf741ecU, 0x500e7fcdU, 0xf62f1791U,
0xd68d764dU, 0xb04d43efU, 0x4d54ccaaU, 0x04dfe496U,
0xb5e39ed1U, 0x881b4c6aU, 0x1fb8c12cU, 0x517f4665U,
0xea049d5eU, 0x355d018cU, 0x7473fa87U, 0x412efb0bU,
0x1d5ab367U, 0xd25292dbU, 0x5633e910U, 0x47136dd6U,
0x618c9ad7U, 0x0c7a37a1U, 0x148e59f8U, 0x3c89eb13U,
0x27eecea9U, 0xc935b761U, 0xe5ede11cU, 0xb13c7a47U,
0xdf599cd2U, 0x733f55f2U, 0xce791814U, 0x37bf73c7U,
0xcdea53f7U, 0xaa5b5ffdU, 0x6f14df3dU, 0xdb867844U,
0xf381caafU, 0xc43eb968U, 0x342c3824U, 0x405fc2a3U,
0xc372161dU, 0x250cbce2U, 0x498b283cU, 0x9541ff0dU,
0x017139a8U, 0xb3de080cU, 0xe49cd8b4U, 0xc1906456U,
0x84617bcbU, 0xb670d532U, 0x5c74486cU, 0x5742d0b8U,
};
static const uint32_t TD3[256] = {
0xf4a75051U, 0x4165537eU, 0x17a4c31aU, 0x275e963aU,
0xab6bcb3bU, 0x9d45f11fU, 0xfa58abacU, 0xe303934bU,
0x30fa5520U, 0x766df6adU, 0xcc769188U, 0x024c25f5U,
0xe5d7fc4fU, 0x2acbd7c5U, 0x35448026U, 0x62a38fb5U,
0xb15a49deU, 0xba1b6725U, 0xea0e9845U, 0xfec0e15dU,
0x2f7502c3U, 0x4cf01281U, 0x4697a38dU, 0xd3f9c66bU,
0x8f5fe703U, 0x929c9515U, 0x6d7aebbfU, 0x5259da95U,
0xbe832dd4U, 0x7421d358U, 0xe0692949U, 0xc9c8448eU,
0xc2896a75U, 0x8e7978f4U, 0x583e6b99U, 0xb971dd27U,
0xe14fb6beU, 0x88ad17f0U, 0x20ac66c9U, 0xce3ab47dU,
0xdf4a1863U, 0x1a3182e5U, 0x51336097U, 0x537f4562U,
0x6477e0b1U, 0x6bae84bbU, 0x81a01cfeU, 0x082b94f9U,
0x48685870U, 0x45fd198fU, 0xde6c8794U, 0x7bf8b752U,
0x73d323abU, 0x4b02e272U, 0x1f8f57e3U, 0x55ab2a66U,
0xeb2807b2U, 0xb5c2032fU, 0xc57b9a86U, 0x3708a5d3U,
0x2887f230U, 0xbfa5b223U, 0x036aba02U, 0x16825cedU,
0xcf1c2b8aU, 0x79b492a7U, 0x07f2f0f3U, 0x69e2a14eU,
0xdaf4cd65U, 0x05bed506U, 0x34621fd1U, 0xa6fe8ac4U,
0x2e539d34U, 0xf355a0a2U, 0x8ae13205U, 0xf6eb75a4U,
0x83ec390bU, 0x60efaa40U, 0x719f065eU, 0x6e1051bdU,
0x218af93eU, 0xdd063d96U, 0x3e05aeddU, 0xe6bd464dU,
0x548db591U, 0xc45d0571U, 0x06d46f04U, 0x5015ff60U,
0x98fb2419U, 0xbde997d6U, 0x4043cc89U, 0xd99e7767U,
0xe842bdb0U, 0x898b8807U, 0x195b38e7U, 0xc8eedb79U,
0x7c0a47a1U, 0x420fe97cU, 0x841ec9f8U, 0x00000000U,
0x80868309U, 0x2bed4832U, 0x1170ac1eU, 0x5a724e6cU,
0x0efffbfdU, 0x8538560fU, 0xaed51e3dU, 0x2d392736U,
0x0fd9640aU, 0x5ca62168U, 0x5b54d19bU, 0x362e3a24U,
0x0a67b10cU, 0x57e70f93U, 0xee96d2b4U, 0x9b919e1bU,
0xc0c54f80U, 0xdc20a261U, 0x774b695aU, 0x121a161cU,
0x93ba0ae2U, 0xa02ae5c0U, 0x22e0433cU, 0x1b171d12U,
0x090d0b0eU, 0x8bc7adf2U, 0xb6a8b92dU, 0x1ea9c814U,
0xf1198557U, 0x75074cafU, 0x99ddbbeeU, 0x7f60fda3U,
0x01269ff7U, 0x72f5bc5cU, 0x663bc544U, 0xfb7e345bU,
0x4329768bU, 0x23c6dccbU, 0xedfc68b6U, 0xe4f163b8U,
0x31dccad7U, 0x63851042U, 0x97224013U, 0xc6112084U,
0x4a247d85U, 0xbb3df8d2U, 0xf93211aeU, 0x29a16dc7U,
0x9e2f4b1dU, 0xb230f3dcU, 0x8652ec0dU, 0xc1e3d077U,
0xb3166c2bU, 0x70b999a9U, 0x9448fa11U, 0xe9642247U,
0xfc8cc4a8U, 0xf03f1aa0U, 0x7d2cd856U, 0x3390ef22U,
0x494ec787U, 0x38d1c1d9U, 0xcaa2fe8cU, 0xd40b3698U,
0xf581cfa6U, 0x7ade28a5U, 0xb78e26daU, 0xadbfa43fU,
0x3a9de42cU, 0x78920d50U, 0x5fcc9b6aU, 0x7e466254U,
0x8d13c2f6U, 0xd8b8e890U, 0x39f75e2eU, 0xc3aff582U,
0x5d80be9fU, 0xd0937c69U, 0xd52da96fU, 0x2512b3cfU,
0xac993bc8U, 0x187da710U, 0x9c636ee8U, 0x3bbb7bdbU,
0x267809cdU, 0x5918f46eU, 0x9ab701ecU, 0x4f9aa883U,
0x956e65e6U, 0xffe67eaaU, 0xbccf0821U, 0x15e8e6efU,
0xe79bd9baU, 0x6f36ce4aU, 0x9f09d4eaU, 0xb07cd629U,
0xa4b2af31U, 0x3f23312aU, 0xa59430c6U, 0xa266c035U,
0x4ebc3774U, 0x82caa6fcU, 0x90d0b0e0U, 0xa7d81533U,
0x04984af1U, 0xecdaf741U, 0xcd500e7fU, 0x91f62f17U,
0x4dd68d76U, 0xefb04d43U, 0xaa4d54ccU, 0x9604dfe4U,
0xd1b5e39eU, 0x6a881b4cU, 0x2c1fb8c1U, 0x65517f46U,
0x5eea049dU, 0x8c355d01U, 0x877473faU, 0x0b412efbU,
0x671d5ab3U, 0xdbd25292U, 0x105633e9U, 0xd647136dU,
0xd7618c9aU, 0xa10c7a37U, 0xf8148e59U, 0x133c89ebU,
0xa927eeceU, 0x61c935b7U, 0x1ce5ede1U, 0x47b13c7aU,
0xd2df599cU, 0xf2733f55U, 0x14ce7918U, 0xc737bf73U,
0xf7cdea53U, 0xfdaa5b5fU, 0x3d6f14dfU, 0x44db8678U,
0xaff381caU, 0x68c43eb9U, 0x24342c38U, 0xa3405fc2U,
0x1dc37216U, 0xe2250cbcU, 0x3c498b28U, 0x0d9541ffU,
0xa8017139U, 0x0cb3de08U, 0xb4e49cd8U, 0x56c19064U,
0xcb84617bU, 0x32b670d5U, 0x6c5c7448U, 0xb85742d0U,
};
static void SetDecryptKeyTbox(CRYPT_AES_Key *ctx)
{
uint32_t i, j;
uint32_t *dkey = ctx->key;
for (i = 1; i < ctx->rounds; i++) {
dkey += 4; // 4: 16 bytes are calculated each time.
for (j = 0; j < 4; j++) { // for 0 to 4 , operation for 16 / sizeof(uint32_t) cycles
dkey[j] = TD0[TE1[(dkey[j] >> 24)] & 0xff] ^ // dkey j >> 24
TD1[TE1[(dkey[j] >> 16) & 0xff] & 0xff] ^ // dkey j >> 16
TD2[TE1[(dkey[j] >> 8) & 0xff] & 0xff] ^ // dkey j >> 8
TD3[TE1[(dkey[j] >> 0) & 0xff] & 0xff]; // dkey j >> 0
}
}
}
static inline uint32_t AES_G(uint32_t w, uint32_t rcon)
{
uint32_t ret = 0;
/* Query the table and perform shift. */
ret ^= (uint32_t)TE2[(w >> 16) & 0xff] & 0xff000000; // 16, row/column conversion relationship in the T table
ret ^= (uint32_t)TE3[(w >> 8) & 0xff] & 0x00ff0000; // 8, row/column conversion relationship in the T table
ret ^= (uint32_t)TE0[(w) & 0xff] & 0x0000ff00; // 0, row/column conversion relationship in the T table
ret ^= (uint32_t)TE1[(w >> 24)] & 0x000000ff; // 24, row/column conversion relationship in the T table
ret ^= rcon;
return ret;
}
void SetEncryptKey128Tbox(CRYPT_AES_Key *ctx, const uint8_t *key)
{
uint32_t *ekey = ctx->key;
ekey[0] = GET_UINT32_BE(key, 0); // ekey 0: Four bytes starting from index key 0
ekey[1] = GET_UINT32_BE(key, 4); // ekey 1: Four bytes starting from index key 4
ekey[2] = GET_UINT32_BE(key, 8); // ekey 2: Four bytes starting from index key 8
ekey[3] = GET_UINT32_BE(key, 12); // ekey 3: Four bytes starting from index key 12
// 128bit Key length required 11 * 4 = 44. Number of expansion rounds: 10 -> 10 * 4 + 4 = 44
uint32_t times;
for (times = 0; times < 9; times++) { // 9 times
ekey[4] = AES_G(ekey[3], RCON[times]) ^ ekey[0]; // ekey 4 = (ekey 3 xor rcon) ^ ekey 0
ekey[5] = ekey[4] ^ ekey[1]; // ekey 5 = ekey 4 ^ ekey 1
ekey[6] = ekey[5] ^ ekey[2]; // ekey 6 = ekey 5 ^ ekey 2
ekey[7] = ekey[6] ^ ekey[3]; // ekey 7 = ekey 6 ^ ekey 3
ekey += 4; // add 4
}
// the last times
ekey[4] = AES_G(ekey[3], RCON[times]) ^ ekey[0]; // ekey 4 = (ekey 3 xor rcon) ^ ekey 0
ekey[5] = ekey[4] ^ ekey[1]; // ekey 5 = ekey 4 ^ ekey 1
ekey[6] = ekey[5] ^ ekey[2]; // ekey 6 = ekey 5 ^ ekey 2
ekey[7] = ekey[6] ^ ekey[3]; // ekey 7 = ekey 6 ^ ekey 3
}
void SetEncryptKey192Tbox(CRYPT_AES_Key *ctx, const uint8_t *key)
{
uint32_t *ekey = ctx->key;
ekey[0] = GET_UINT32_BE(key, 0); // ekey 0: Four bytes starting from index key 0
ekey[1] = GET_UINT32_BE(key, 4); // ekey 1: Four bytes starting from index key 4
ekey[2] = GET_UINT32_BE(key, 8); // ekey 2: Four bytes starting from index key 8
ekey[3] = GET_UINT32_BE(key, 12); // ekey 3: Four bytes starting from index key 12
ekey[4] = GET_UINT32_BE(key, 16); // ekey 4: Four bytes starting from index key 16
ekey[5] = GET_UINT32_BE(key, 20); // ekey 5: Four bytes starting from index key 20
uint32_t times;
for (times = 0; times < 7; times++) { // 7 times
ekey[6] = AES_G(ekey[5], RCON[times]) ^ ekey[0]; // ekey 6 = AES_G(ekey 5 xor rcon) ^ ekey 0
ekey[7] = ekey[6] ^ ekey[1]; // ekey 7 = ekey 6 ^ ekey 1
ekey[8] = ekey[7] ^ ekey[2]; // ekey 8 = ekey 7 ^ ekey 2
ekey[9] = ekey[8] ^ ekey[3]; // ekey 9 = ekey 8 ^ ekey 3
ekey[10] = ekey[9] ^ ekey[4]; // ekey 10 = ekey 9 ^ ekey 4
ekey[11] = ekey[10] ^ ekey[5]; // ekey 11 = ekey 10 ^ ekey 5
ekey += 6; // add 6
}
// the last times
ekey[6] = AES_G(ekey[5], RCON[times]) ^ ekey[0]; // ekey 6 = AES_G(ekey 5 xor rcon) ^ ekey 0
ekey[7] = ekey[6] ^ ekey[1]; // ekey 7 = ekey 6 ^ ekey 1
ekey[8] = ekey[7] ^ ekey[2]; // ekey 8 = ekey 7 ^ ekey 2
ekey[9] = ekey[8] ^ ekey[3]; // ekey 9 = ekey 8 ^ ekey 3
}
void SetEncryptKey256Tbox(CRYPT_AES_Key *ctx, const uint8_t *key)
{
uint32_t *ekey = ctx->key;
ekey[0] = GET_UINT32_BE(key, 0); // ekey 0: Four bytes starting from index key 0
ekey[1] = GET_UINT32_BE(key, 4); // ekey 1: Four bytes starting from index key 4
ekey[2] = GET_UINT32_BE(key, 8); // ekey 2: Four bytes starting from index key 8
ekey[3] = GET_UINT32_BE(key, 12); // ekey 3: Four bytes starting from index key 12
ekey[4] = GET_UINT32_BE(key, 16); // ekey 4: Four bytes starting from index key 16
ekey[5] = GET_UINT32_BE(key, 20); // ekey 5: Four bytes starting from index key 20
ekey[6] = GET_UINT32_BE(key, 24); // ekey 6: Four bytes starting from index key 24
ekey[7] = GET_UINT32_BE(key, 28); // ekey 7: Four bytes starting from index key 28
/* The key length must be 15 * 4 = 60. The number of extension rounds is 7 -> 7 * 8 + 8 - 4 = 60 */
uint32_t times;
uint32_t tmp;
for (times = 0; times < 6; times++) { // 6 times
ekey[8] = AES_G(ekey[7], RCON[times]) ^ ekey[0]; // ekey 8 = AES_G(ekey 7 xor rcon) ^ ekey 0
ekey[9] = ekey[8] ^ ekey[1]; // ekey 9 = ekey 8 ^ ekey 1
ekey[10] = ekey[9] ^ ekey[2]; // ekey 10 = ekey 9 ^ ekey 2
ekey[11] = ekey[10] ^ ekey[3]; // ekey 11 = ekey 10 ^ ekey 3
/* Shift operation to compensate for G operation */
tmp = (ekey[11] >> 8) | (ekey[11] << 24); // tmp is ekey 11 >> 8 | ekey 11 << 24
ekey[12] = AES_G(tmp, 0) ^ ekey[4]; // ekey 12 = AES_G(tme, 0) ^ ekey 4
ekey[13] = ekey[12] ^ ekey[5]; // ekey 13 = ekey 12 ^ ekey 5
ekey[14] = ekey[13] ^ ekey[6]; // ekey 14 = ekey 13 ^ ekey 6
ekey[15] = ekey[14] ^ ekey[7]; // ekey 15 = ekey 14 ^ ekey 7
ekey += 8; // add 8
}
// the last times
ekey[8] = AES_G(ekey[7], RCON[times]) ^ ekey[0]; // ekey 8 = AES_G(ekey 7 xor rcon) ^ ekey 0
ekey[9] = ekey[8] ^ ekey[1]; // ekey 9 = ekey 8 ^ ekey 1
ekey[10] = ekey[9] ^ ekey[2]; // ekey 10 = ekey 9 ^ ekey 2
ekey[11] = ekey[10] ^ ekey[3]; // ekey 11 = ekey 10 ^ ekey 3
}
void SetAesKeyExpansionTbox(CRYPT_AES_Key *ctx, uint32_t keyLenBits, const uint8_t *key, bool isEncrypt)
{
switch (keyLenBits) {
case CRYPT_AES_128:
SetEncryptKey128Tbox(ctx, key);
break;
case CRYPT_AES_192:
SetEncryptKey192Tbox(ctx, key);
break;
case CRYPT_AES_256:
SetEncryptKey256Tbox(ctx, key);
break;
default:
return;
}
if (!isEncrypt) {
SetDecryptKeyTbox(ctx);
}
}
#define AES_ROUND_INIT(in, r, enc) \
do { \
r##0 = GET_UINT32_BE(in, 0) ^ enc##key[0]; \
r##1 = GET_UINT32_BE(in, 4) ^ enc##key[1]; \
r##2 = GET_UINT32_BE(in, 8) ^ enc##key[2]; \
r##3 = GET_UINT32_BE(in, 12) ^ enc##key[3]; \
} while (0)
#define AES_ENC_ROUND(in, r, i, ekey) \
do { \
r##0 = TE0[(in##0 >> 24)] ^ TE1[(in##1 >> 16) & 0xff] ^ TE2[(in##2 >> 8) & 0xff] ^ TE3[(in##3) & 0xff] \
^ (ekey)[((i) << 2) + 0]; \
r##1 = TE0[(in##1 >> 24)] ^ TE1[(in##2 >> 16) & 0xff] ^ TE2[(in##3 >> 8) & 0xff] ^ TE3[(in##0) & 0xff] \
^ (ekey)[((i) << 2) + 1]; \
r##2 = TE0[(in##2 >> 24)] ^ TE1[(in##3 >> 16) & 0xff] ^ TE2[(in##0 >> 8) & 0xff] ^ TE3[(in##1) & 0xff] \
^ (ekey)[((i) << 2) + 2]; \
r##3 = TE0[(in##3 >> 24)] ^ TE1[(in##0 >> 16) & 0xff] ^ TE2[(in##1 >> 8) & 0xff] ^ TE3[(in##2) & 0xff] \
^ (ekey)[((i) << 2) + 3]; \
} while (0)
void CRYPT_AES_EncryptTbox(const CRYPT_AES_Key *ctx, const uint8_t *in, uint8_t *out, uint32_t len)
{
(void)len;
const uint32_t *ekey = ctx->key;
uint32_t c0, c1, c2, c3, t0, t1, t2, t3;
AES_ROUND_INIT(in, c, e);
AES_ENC_ROUND(c, t, 1, ekey);
AES_ENC_ROUND(t, c, 2, ekey);
AES_ENC_ROUND(c, t, 3, ekey);
AES_ENC_ROUND(t, c, 4, ekey);
AES_ENC_ROUND(c, t, 5, ekey);
AES_ENC_ROUND(t, c, 6, ekey);
AES_ENC_ROUND(c, t, 7, ekey);
AES_ENC_ROUND(t, c, 8, ekey);
AES_ENC_ROUND(c, t, 9, ekey);
if (ctx->rounds > 10) { // AES192/AES256 Performs 10th and 11th rounds of calculation.
AES_ENC_ROUND(t, c, 10, ekey);
AES_ENC_ROUND(c, t, 11, ekey);
}
if (ctx->rounds > 12) { // AES256 Performs 12th and 13th rounds of calculation.
AES_ENC_ROUND(t, c, 12, ekey);
AES_ENC_ROUND(c, t, 13, ekey);
}
/* In the last round, the column confusion is not performed.
Instead, the shift is performed and the s-box is searched. */
// Do the position of the last ekey calculation, which is the ekey that has been used 4*rounds.
ekey += ctx->rounds * 4;
c0 = TESEARCH(t0, t1, t2, t3) ^ ekey[0]; // operation ekey 0
c1 = TESEARCH(t1, t2, t3, t0) ^ ekey[1]; // operation ekey 1
c2 = TESEARCH(t2, t3, t0, t1) ^ ekey[2]; // operation ekey 2
c3 = TESEARCH(t3, t0, t1, t2) ^ ekey[3]; // operation ekey 3
PUT_UINT32_BE(c0, out, 0); // c0 is converted into four bytes which index is 0 in the out.
PUT_UINT32_BE(c1, out, 4); // c1 is converted into four bytes which index is 4 in the out.
PUT_UINT32_BE(c2, out, 8); // c2 is converted into four bytes which index is 8 in the out
PUT_UINT32_BE(c3, out, 12); // c3 is converted into four bytes which index is 12 in the out
}
#define AES_DEC_ROUND(in, r, i, dkey) \
do { \
r##0 = TD0[(in##0 >> 24)] ^ TD1[(in##3 >> 16) & 0xff] ^ TD2[(in##2 >> 8) & 0xff] ^ TD3[(in##1) & 0xff] \
^ (dkey)[((i) << 2) + 0]; \
r##1 = TD0[(in##1 >> 24)] ^ TD1[(in##0 >> 16) & 0xff] ^ TD2[(in##3 >> 8) & 0xff] ^ TD3[(in##2) & 0xff] \
^ (dkey)[((i) << 2) + 1]; \
r##2 = TD0[(in##2 >> 24)] ^ TD1[(in##1 >> 16) & 0xff] ^ TD2[(in##0 >> 8) & 0xff] ^ TD3[(in##3) & 0xff] \
^ (dkey)[((i) << 2) + 2]; \
r##3 = TD0[(in##3 >> 24)] ^ TD1[(in##2 >> 16) & 0xff] ^ TD2[(in##1 >> 8) & 0xff] ^ TD3[(in##0) & 0xff] \
^ (dkey)[((i) << 2) + 3]; \
} while (0)
void CRYPT_AES_DecryptTbox(const CRYPT_AES_Key *ctx, const uint8_t *in, uint8_t *out, uint32_t len)
{
(void)len;
const uint32_t *dkey = ctx->key + ctx->rounds * 4; // 4 bytes
uint32_t p0, p1, p2, p3, t0, t1, t2, t3;
// Initialize p0. The dkey starts from the end of the key.
AES_ROUND_INIT(in, p, d);
dkey = ctx->key;
if (ctx->rounds > 12) { // AES256 Performs 12th and 13th rounds of calculation.
AES_DEC_ROUND(p, t, 13, dkey);
AES_DEC_ROUND(t, p, 12, dkey);
}
if (ctx->rounds > 10) { // AES192 Performs 10th and 11th rounds of calculation.
AES_DEC_ROUND(p, t, 11, dkey);
AES_DEC_ROUND(t, p, 10, dkey);
}
AES_DEC_ROUND(p, t, 9, dkey);
AES_DEC_ROUND(t, p, 8, dkey);
AES_DEC_ROUND(p, t, 7, dkey);
AES_DEC_ROUND(t, p, 6, dkey);
AES_DEC_ROUND(p, t, 5, dkey);
AES_DEC_ROUND(t, p, 4, dkey);
AES_DEC_ROUND(p, t, 3, dkey);
AES_DEC_ROUND(t, p, 2, dkey);
AES_DEC_ROUND(p, t, 1, dkey);
/* In the last round, the column confusion is not performed.
Instead, the shift is directly performed and the inverse s-box is searched. */
p0 = INVSSEARCH(t0, t1, t2, t3) ^ dkey[0]; // dkey 0
p1 = INVSSEARCH(t1, t2, t3, t0) ^ dkey[1]; // dkey 1
p2 = INVSSEARCH(t2, t3, t0, t1) ^ dkey[2]; // dkey 2
p3 = INVSSEARCH(t3, t0, t1, t2) ^ dkey[3]; // dkey 3
PUT_UINT32_BE(p0, out, 0); // 4 bytes starting from the index 0 of the in
PUT_UINT32_BE(p1, out, 4); // 4 bytes starting from the index 4 of the in
PUT_UINT32_BE(p2, out, 8); // 4 bytes starting from the index 8 of the in
PUT_UINT32_BE(p3, out, 12); // 4 bytes starting from the index 12 of the in
BSL_SAL_CleanseData(&p0, sizeof(uint32_t));
BSL_SAL_CleanseData(&p1, sizeof(uint32_t));
BSL_SAL_CleanseData(&p2, sizeof(uint32_t));
BSL_SAL_CleanseData(&p3, sizeof(uint32_t));
}
#endif /* HITLS_CRYPTO_AES_PRECALC_TABLES */
#endif /* HITLS_CRYPTO_AES */
| 2302_82127028/openHiTLS-examples_1508 | crypto/aes/src/crypt_aes_tbox.c | C | unknown | 44,548 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#ifndef CRYPT_AES_TBOX_H
#define CRYPT_AES_TBOX_H
#include "hitls_build.h"
#if defined(HITLS_CRYPTO_AES) && defined(HITLS_CRYPTO_AES_PRECALC_TABLES)
#include "crypt_aes.h"
void SetAesKeyExpansionTbox(CRYPT_AES_Key *ctx, uint32_t keyLenBits, const uint8_t *key, bool isEncrypt);
void CRYPT_AES_EncryptTbox(const CRYPT_AES_Key *ctx, const uint8_t *in, uint8_t *out, uint32_t len);
void CRYPT_AES_DecryptTbox(const CRYPT_AES_Key *ctx, const uint8_t *in, uint8_t *out, uint32_t len);
#endif /* HITLS_CRYPTO_AES_PRECALC_TABLES */
#endif | 2302_82127028/openHiTLS-examples_1508 | crypto/aes/src/crypt_aes_tbox.h | C | unknown | 1,107 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#ifndef CRYPT_BN_H
#define CRYPT_BN_H
#include "hitls_build.h"
#ifdef HITLS_CRYPTO_BN
#include <stdint.h>
#include <stdlib.h>
#include <stdbool.h>
#ifdef __cplusplus
extern "C" {
#endif
#if defined(HITLS_SIXTY_FOUR_BITS)
#define BN_UINT uint64_t
#define BN_MASK (0xffffffffffffffffL)
#define BN_DEC_VAL (10000000000000000000ULL)
#define BN_DEC_LEN 19
#define BN_UNIT_BITS 64
#elif defined(HITLS_THIRTY_TWO_BITS)
#define BN_UINT uint32_t
#define BN_MASK (0xffffffffL)
#define BN_DEC_VAL (1000000000L)
#define BN_DEC_LEN 9
#define BN_UNIT_BITS 32
#else
#error BN_UINT MUST be defined first.
#endif
#define BN_MAX_BITS (1u << 29) /* @note: BN_BigNum bits limitation 2^29 bits */
#define BN_BITS_TO_BYTES(n) (((n) + 7) >> 3) /* @note: Calcute bytes form bits, bytes = (bits + 7) >> 3 */
#define BN_BYTES_TO_BITS(n) ((n) << 3) /* bits = bytes * 8 = bytes << 3 */
#define BN_UINT_BITS ((uint32_t)sizeof(BN_UINT) << 3)
#define BITS_TO_BN_UNIT(bits) (((bits) + BN_UINT_BITS - 1) / BN_UINT_BITS)
/* Flag of BigNum. If a new number is added, the value increases by 0x01 0x02 0x04... */
typedef enum {
CRYPT_BN_FLAG_OPTIMIZER = 0x01, /**< Flag of BigNum, indicating the BigNum obtained from the optimizer */
CRYPT_BN_FLAG_STATIC = 0x02, /**< Flag of BigNum, indicating the BN memory management belongs to the user. */
CRYPT_BN_FLAG_CONSTTIME = 0x04, /**< Flag of BigNum, indicating the constant time execution. */
} CRYPT_BN_FLAG;
typedef struct BigNum {
bool sign; /* *< bignum sign: negtive(true) or not(false) */
uint32_t size; /* *< bignum size (count of BN_UINT) */
uint32_t room; /* *< bignum max size (count of BN_UINT) */
uint32_t flag; /* *< bignum flag */
BN_UINT *data; /* *< bignum data chunk(most significant limb at the largest) */
} BN_BigNum;
typedef struct BnMont BN_Mont;
typedef struct BnOptimizer BN_Optimizer;
typedef struct BnCbCtx BN_CbCtx;
typedef int32_t (*BN_CallBack)(BN_CbCtx *, int32_t, int32_t);
/* If a is 0, all Fs are returned. If a is not 0, 0 is returned. */
static inline BN_UINT BN_IsZeroUintConsttime(BN_UINT a)
{
BN_UINT t = ~a & (a - 1); // The most significant bit of t is 1 only when a == 0.
// Shifting 3 bits to the left is equivalent to multiplying 8, convert the number of bytes into the number of bits.
return (BN_UINT)0 - (t >> (((uint32_t)sizeof(BN_UINT) << 3) - 1));
}
#ifdef HITLS_CRYPTO_EAL_BN
/* Check whether the BN entered externally is valid. */
bool BnVaild(const BN_BigNum *a);
#endif
/**
* @ingroup bn
* @brief BigNum creation
*
* @param bits [IN] Number of bits
*
* @retval not-NULL Success
* @retval NULL fail
*/
BN_BigNum *BN_Create(uint32_t bits);
/**
* @ingroup bn
* @brief BigNum Destruction
*
* @param a [IN] BigNum
*
* @retval none
*/
void BN_Destroy(BN_BigNum *a);
/**
* @ingroup bn
* @brief BN initialization
* @attention This interface is used to create the BN structure between modules. The BN does not manage the memory of
the external BN structure and internal data space. the interface only the fixed attributes such as data,
room, and flag. The size attribute is defined by the caller.
*
* @param bn [IN/OUT] BN, which is created by users and is not managed by the BN.
* @param data [IN] BN data, the memory is allocated by the user and is not managed by the BN.
* @param number [IN] number of BN that need to be initialized.
*
* @retval void
*/
void BN_Init(BN_BigNum *bn, BN_UINT *data, uint32_t room, int32_t number);
#ifdef HITLS_CRYPTO_BN_CB
/**
* @ingroup bn
* @brief BigNum callback creation
*
* @param none
*
* @retval not-NULL Success
* @retval NULL fail
*/
BN_CbCtx *BN_CbCtxCreate(void);
/**
* @ingroup bn
* @brief BigNum callback configuration
*
* @param gencb [out] Callback
* @param callBack [in] Callback API
* @param arg [in] Callback parameters
*
* @retval none
*/
void BN_CbCtxSet(BN_CbCtx *gencb, BN_CallBack callBack, void *arg);
/**
* @ingroup bn
* @brief Invoke the callback.
*
* @param callBack [out] Callback
* @param process [in] Parameter
* @param target [in] Parameter
* @retval CRYPT_SUCCESS succeeded
* @retval other determined by the callback function
*/
int32_t BN_CbCtxCall(BN_CbCtx *callBack, int32_t process, int32_t target);
/**
* @ingroup bn
* @brief Obtain the arg parameter in the callback.
*
* @param callBack [in] Callback
* @retval void* NULL or callback parameter.
*/
void *BN_CbCtxGetArg(BN_CbCtx *callBack);
/**
* @ingroup bn
* @brief Callback release
*
* @param cb [in] Callback
*
* @retval none
*/
void BN_CbCtxDestroy(BN_CbCtx *cb);
#endif
/**
* @ingroup bn
* @brief Set the symbol.
*
* @param a [IN] BigNum
* @param sign [IN] symbol. The value true indicates a negative number and the value false indicates a positive number.
*
* @retval CRYPT_SUCCESS
* @retval CRYPT_NULL_INPUT Invalid null pointer
* @retval CRYPT_BN_NO_NEGATOR_ZERO 0 cannot be set to a negative sign.
*/
int32_t BN_SetSign(BN_BigNum *a, bool sign);
/**
* @ingroup bn
* @brief Set the flag.
*
* @param a [IN] BigNum
* @param flag [IN] flag, for example, BN_MARK_CONSTTIME indicates that the constant interface is used.
*
* @retval CRYPT_SUCCESS
* @retval CRYPT_NULL_INPUT Invalid null pointer
* @retval CRYPT_BN_FLAG_INVALID Invalid BigNum flag.
*/
int32_t BN_SetFlag(BN_BigNum *a, uint32_t flag);
/**
* @ingroup bn
* @brief BigNum copy
*
* @param r [OUT] BigNum
* @param a [IN] BigNum
*
* @retval CRYPT_SUCCESS succeeded.
* @retval CRYPT_NULL_INPUT Invalid null pointer
* @retval CRYPT_MEM_ALLOC_FAIL Memory allocation failure
*/
int32_t BN_Copy(BN_BigNum *r, const BN_BigNum *a);
/**
* @ingroup bn
* @brief Generate a BigNum with the same content.
*
* @param a [IN] BigNum
*
* @retval Not NULL Success
* @retval NULL failure
*/
BN_BigNum *BN_Dup(const BN_BigNum *a);
/**
* @ingroup bn
* @brief Check whether the value of a BigNum is 0.
*
* @attention The input parameter cannot be null.
* @param a [IN] BigNum
*
* @retval true. The value of a BigNum is 0.
* @retval false. The value of a BigNum is not 0.
* @retval other: indicates that the input parameter is abnormal.
*
*/
bool BN_IsZero(const BN_BigNum *a);
/**
* @ingroup bn
* @brief Check whether the value of a BigNum is 1.
*
* @attention The input parameter cannot be null.
* @param a [IN] BigNum
*
* @retval true. The value of a BigNum is 1.
* @retval false. The value of a BigNum is not 1.
* @retval other: indicates that the input parameter is abnormal.
*
*/
bool BN_IsOne(const BN_BigNum *a);
/**
* @ingroup bn
* @brief Check whether a BigNum is a negative number.
*
* @attention The input parameter cannot be null.
* @param a [IN] BigNum
*
* @retval true. The value of a BigNum is a negative number.
* @retval false. The value of a BigNum is not a negative number.
*
*/
bool BN_IsNegative(const BN_BigNum *a);
/**
* @ingroup bn
* @brief Check whether the value of a BigNum is an odd number.
*
* @attention The input parameter cannot be null.
* @param a [IN] BigNum
*
* @retval true. The value of a BigNum is an odd number.
* @retval false. The value of a BigNum is not an odd number.
* @retval other: indicates that the input parameter is abnormal.
*
*/
bool BN_IsOdd(const BN_BigNum *a);
/**
* @ingroup bn
* @brief Check whether the flag of a BigNum meets the expected flag.
*
* @param a [IN] BigNum
* @param flag [IN] Flag. For example, BN_MARK_CONSTTIME indicates that the constant interface is used.
*
* @retval true, invalid null pointer
* @retval false, 0 cannot be set to a negative number.
* @retval other: indicates that the input parameter is abnormal.
*/
bool BN_IsFlag(const BN_BigNum *a, uint32_t flag);
/**
* @ingroup bn
* @brief Set the value of a BigNum to 0.
*
* @param a [IN] BigNum
*
* @retval CRYPT_SUCCESS
* @retval CRYPT_NULL_INPUT Invalid null pointer
* @retval other: indicates that the input parameter is abnormal.
*/
int32_t BN_Zeroize(BN_BigNum *a);
/**
* @ingroup bn
* @brief Compare whether the value of BigNum a is the target limb w.
*
* @attention The input parameter cannot be null.
* @param a [IN] BigNum
* @param w [IN] Limb
*
* @retval true: equal
* @retval false, not equal
* @retval other: indicates that the input parameter is abnormal.
*/
bool BN_IsLimb(const BN_BigNum *a, const BN_UINT w);
/**
* @ingroup bn
* @brief Set a limb to the BigNum.
*
* @param a [IN] BigNum
* @param w [IN] Limb
*
* @retval CRYPT_SUCCESS
* @retval CRYPT_NULL_INPUT Invalid null pointer
* @retval CRYPT_MEM_ALLOC_FAIL Memory allocation failure
*/
int32_t BN_SetLimb(BN_BigNum *r, BN_UINT w);
/**
* @ingroup bn
* @brief Obtain the limb from the BigNum.
*
* @param a [IN] BigNum
*
* @retval 0 Get 0
* @retval BN_MASK Obtain the mask.
* @retval others The limb is obtained successfully.
*/
BN_UINT BN_GetLimb(const BN_BigNum *a);
/**
* @ingroup bn
* @brief Obtain the value of the bit corresponding to a BigNum. The value is 1 or 0.
*
* @attention The input parameter of a BigNum cannot be null.
* @param a [IN] BigNum
* @param n [IN] Number of bits
*
* @retval true. The corresponding bit is 1.
* @retval false. The corresponding bit is 0.
*
*/
bool BN_GetBit(const BN_BigNum *a, uint32_t n);
/**
* @ingroup bn
* @brief Set the bit corresponding to the BigNum to 1.
*
* @param a [IN] BigNum
* @param n [IN] Number of bits
*
* @retval CRYPT_SUCCESS
* @retval CRYPT_NULL_INPUT Invalid null pointer
* @retval CRYPT_BN_SPACE_NOT_ENOUGH The space is insufficient.
*/
int32_t BN_SetBit(BN_BigNum *a, uint32_t n);
/**
* @ingroup bn
* @brief Clear the bit corresponding to the BigNum to 0.
*
* @param a [IN] BigNum
* @param n [IN] Number of bits
*
* @retval CRYPT_SUCCESS
* @retval CRYPT_NULL_INPUT Invalid null pointer
* @retval CRYPT_BN_SPACE_NOT_ENOUGH The space is insufficient.
*/
int32_t BN_ClrBit(BN_BigNum *a, uint32_t n);
/**
* @ingroup bn
* @brief Truncate a BigNum from the corresponding bit.
*
* @param a [IN] BigNum
* @param n [IN] Number of bits
*
* @retval CRYPT_SUCCESS
* @retval CRYPT_NULL_INPUT Invalid null pointer
* @retval CRYPT_BN_SPACE_NOT_ENOUGH The space is insufficient.
*/
int32_t BN_MaskBit(BN_BigNum *a, uint32_t n);
/**
* @ingroup bn
* @brief Obtain the valid bit length of a BigNum.
*
* @attention The input parameter of a BigNum cannot be null.
* @param a [IN] BigNum
*
* @retval uint32_t, valid bit length
*/
uint32_t BN_Bits(const BN_BigNum *a);
/**
* @ingroup bn
* @brief Obtain the valid byte length of a BigNum.
*
* @attention The large input parameter cannot be a null pointer.
* @param a [IN] BigNum
*
* @retval uint32_t, valid byte length of a BigNum
*/
uint32_t BN_Bytes(const BN_BigNum *a);
/**
* @ingroup bn
* @brief BigNum Calculate the greatest common divisor
* @par Description: gcd(a, b) (a, b!=0)
*
* @param r [OUT] greatest common divisor
* @param a [IN] BigNum
* @param b [IN] BigNum
* @param opt [IN] Optimizer
*
* @retval CRYPT_SUCCESS
* @retval CRYPT_NULL_INPUT Invalid null pointer
* @retval CRYPT_MEM_ALLOC_FAIL Memory allocation failure
* @retval CRYPT_BN_OPTIMIZER_GET_FAIL Failed to apply for space from the optimizer.
* @retval CRYPT_BN_ERR_GCD_NO_ZERO The greatest common divisor cannot be 0.
*/
int32_t BN_Gcd(BN_BigNum *r, const BN_BigNum *a, const BN_BigNum *b, BN_Optimizer *opt);
/**
* @ingroup bn
* @brief BigNum modulo inverse
*
* @param r [OUT] Result
* @param x [IN] BigNum
* @param m [IN] mod
* @param opt [IN] Optimizer
*
* @retval CRYPT_SUCCESS
* @retval CRYPT_NULL_INPUT Invalid null pointer
* @retval CRYPT_MEM_ALLOC_FAIL Memory allocation failure
* @retval CRYPT_BN_OPTIMIZER_GET_FAIL Failed to apply for space from the optimizer.
* @retval CRYPT_BN_ERR_NO_INVERSE Cannot calculate the module inverse.
*/
int32_t BN_ModInv(BN_BigNum *r, const BN_BigNum *x, const BN_BigNum *m, BN_Optimizer *opt);
/**
* @ingroup bn
* @brief BigNum comparison
*
* @attention The input parameter of a BigNum cannot be null.
* @param a [IN] BigNum
* @param b [IN] BigNum
*
* @retval 0,a == b
* @retval 1,a > b
* @retval -1,a < b
*/
int32_t BN_Cmp(const BN_BigNum *a, const BN_BigNum *b);
/**
* @ingroup bn
* @brief BigNum Addition
*
* @param r [OUT] and
* @param a [IN] Addendum
* @param b [IN] Addendum
*
* @retval CRYPT_SUCCESS
* @retval CRYPT_NULL_INPUT Invalid null pointer
* @retval CRYPT_MEM_ALLOC_FAIL Memory allocation failure
*/
int32_t BN_Add(BN_BigNum *r, const BN_BigNum *a, const BN_BigNum *b);
/**
* @ingroup bn
* @brief BigNum plus limb
*
* @param r [OUT] and
* @param a [IN] Addendum
* @param w [IN] Addendum
*
* @retval CRYPT_SUCCESS
* @retval CRYPT_NULL_INPUT Invalid null pointer
* @retval CRYPT_MEM_ALLOC_FAIL Memory allocation failure
*/
int32_t BN_AddLimb(BN_BigNum *r, const BN_BigNum *a, BN_UINT w);
/**
* @ingroup bn
* @brief subtraction of large numbers
*
* @param r [OUT] difference
* @param a [IN] minuend
* @param b [IN] subtrahend
*
* @retval CRYPT_SUCCESS
* @retval CRYPT_NULL_INPUT Invalid null pointer
* @retval CRYPT_MEM_ALLOC_FAIL Memory allocation failure
*/
int32_t BN_Sub(BN_BigNum *r, const BN_BigNum *a, const BN_BigNum *b);
/**
* @ingroup bn
* @brief BigNum minus limb
*
* @param r [OUT] difference
* @param a [IN] minuend
* @param w [IN] subtrahend
*
* @retval CRYPT_SUCCESS
* @retval CRYPT_NULL_INPUT Invalid null pointer
* @retval CRYPT_MEM_ALLOC_FAIL Memory allocation failure
*/
int32_t BN_SubLimb(BN_BigNum *r, const BN_BigNum *a, BN_UINT w);
/**
* @ingroup bn
* @brief BigNum Multiplication
*
* @param r [OUT] product
* @param a [IN] multiplier
* @param b [IN] multiplier
* @param opt [IN] Optimizer
*
* @retval CRYPT_SUCCESS
* @retval CRYPT_NULL_INPUT Invalid null pointer
* @retval CRYPT_MEM_ALLOC_FAIL Memory allocation failure
* @retval CRYPT_BN_OPTIMIZER_GET_FAIL Failed to apply for space from the optimizer.
*/
int32_t BN_Mul(BN_BigNum *r, const BN_BigNum *a, const BN_BigNum *b, BN_Optimizer *opt);
/**
* @ingroup bn
* @brief Multiplication of BigNum by Limb
*
* @param r [OUT] product
* @param a [IN] multiplicand
* @param w [IN] multiplier (limb)
*
* @retval CRYPT_SUCCESS
* @retval CRYPT_NULL_INPUT Invalid null pointer
* @retval CRYPT_MEM_ALLOC_FAIL Memory allocation failure
*/
int32_t BN_MulLimb(BN_BigNum *r, const BN_BigNum *a, const BN_UINT w);
/**
* @ingroup bn
* @brief BigNum square. r must not be a.
*
* @param r [OUT] product
* @param a [IN] multiplier
* @param opt [IN] Optimizer
*
* @retval CRYPT_SUCCESS
* @retval CRYPT_NULL_INPUT Invalid null pointer
* @retval CRYPT_MEM_ALLOC_FAIL Memory allocation failure
* @retval CRYPT_BN_OPTIMIZER_GET_FAIL Failed to apply for space from the optimizer.
*/
int32_t BN_Sqr(BN_BigNum *r, const BN_BigNum *a, BN_Optimizer *opt);
/**
* @ingroup bn
* @brief BigNum Division
*
* @param q [OUT] quotient
* @param r [OUT] remainder
* @param x [IN] dividend
* @param y [IN] divisor
* @param opt [IN] optimizer
*
* @retval CRYPT_SUCCESS
* @retval CRYPT_NULL_INPUT Invalid null pointer
* @retval CRYPT_INVALID_ARG The addresses of q, r are identical, or both of them are null.
* @retval CRYPT_MEM_ALLOC_FAIL Memory allocation failure
* @retval CRYPT_BN_OPTIMIZER_GET_FAIL Failed to apply for space from the optimizer.
* @retval CRYPT_BN_ERR_DIVISOR_ZERO divisor cannot be 0.
*/
int32_t BN_Div(BN_BigNum *q, BN_BigNum *r, const BN_BigNum *x, const BN_BigNum *y, BN_Optimizer *opt);
/**
* @ingroup bn
* @brief BigNum divided by limb
*
* @param q [OUT] quotient
* @param r [OUT] remainder
* @param x [IN] dividend
* @param y [IN] Divisor (limb)
*
* @retval CRYPT_SUCCESS
* @retval CRYPT_NULL_INPUT Invalid null pointer
* @retval CRYPT_MEM_ALLOC_FAIL Memory allocation failure
* @retval CRYPT_BN_ERR_DIVISOR_ZERO divisor cannot be 0.
*/
int32_t BN_DivLimb(BN_BigNum *q, BN_UINT *r, const BN_BigNum *x, const BN_UINT y);
/**
* @ingroup bn
* @brief BigNum Modular addition
* @par Description: r = (a + b) mod (mod)
*
* @param r [OUT] Modulus result
* @param a [IN] BigNum
* @param b [IN] BigNum
* @param mod [IN] mod
* @param opt [IN] Optimizer
*
* @retval CRYPT_SUCCESS
* @retval CRYPT_NULL_INPUT Invalid null pointer
* @retval CRYPT_MEM_ALLOC_FAIL Memory allocation failure
* @retval CRYPT_BN_OPTIMIZER_GET_FAIL Failed to apply for space from the optimizer.
* @retval CRYPT_BN_ERR_DIVISOR_ZERO module cannot be 0.
*/
int32_t BN_ModAdd(BN_BigNum *r, const BN_BigNum *a, const BN_BigNum *b,
const BN_BigNum *mod, BN_Optimizer *opt);
/**
* @ingroup bn
* @brief BigNum Modular subtraction
* @par Description: r = (a - b) mod (mod)
*
* @param r [OUT] Modulo result
* @param a [IN] minuend
* @param b [IN] subtrahend
* @param mod [IN] mod
* @param opt [IN] Optimizer
*
* @retval CRYPT_SUCCESS
* @retval CRYPT_NULL_INPUT Invalid null pointer
* @retval CRYPT_MEM_ALLOC_FAIL Memory allocation failure
* @retval CRYPT_BN_OPTIMIZER_GET_FAIL Failed to apply for space from the optimizer.
* @retval CRYPT_BN_ERR_DIVISOR_ZERO module cannot be 0.
*/
int32_t BN_ModSub(BN_BigNum *r, const BN_BigNum *a, const BN_BigNum *b,
const BN_BigNum *mod, BN_Optimizer *opt);
/**
* @ingroup bn
* @brief BigNum Modular multiplication
* @par Description: r = (a * b) mod (mod)
*
* @param r [OUT] Modulus result
* @param a [IN] BigNum
* @param b [IN] BigNum
* @param mod [IN] mod
* @param opt [IN] Optimizer
*
* @retval CRYPT_SUCCESS
* @retval CRYPT_NULL_INPUT Invalid null pointer
* @retval CRYPT_MEM_ALLOC_FAIL Memory allocation failure
* @retval CRYPT_BN_OPTIMIZER_GET_FAIL Failed to apply for space from the optimizer.
* @retval CRYPT_BN_ERR_DIVISOR_ZERO module cannot be 0.
*/
int32_t BN_ModMul(BN_BigNum *r, const BN_BigNum *a, const BN_BigNum *b,
const BN_BigNum *mod, BN_Optimizer *opt);
/**
* @ingroup bn
* @brief BigNum Modular squared
* @par Description: r = (a ^ 2) mod (mod)
*
* @param r [OUT] Modulus result
* @param a [IN] BigNum
* @param mod [IN] mod
* @param opt [IN] Optimizer
*
* @retval CRYPT_SUCCESS
* @retval CRYPT_NULL_INPUT Invalid null pointer
* @retval CRYPT_MEM_ALLOC_FAIL Memory allocation failure
* @retval CRYPT_BN_OPTIMIZER_GET_FAIL Failed to apply for space from the optimizer.
* @retval CRYPT_BN_ERR_DIVISOR_ZERO module cannot be 0.
*/
int32_t BN_ModSqr(
BN_BigNum *r, const BN_BigNum *a, const BN_BigNum *mod, BN_Optimizer *opt);
/**
* @ingroup bn
* @brief BigNum Modular power
* @par Description: r = (a ^ e) mod (mod)
*
* @param r [OUT] Modulus result
* @param a [IN] BigNum
* @param mod [IN] mod
* @param opt [IN] Optimizer
*
* @retval CRYPT_SUCCESS
* @retval CRYPT_NULL_INPUT Invalid null pointer
* @retval CRYPT_MEM_ALLOC_FAIL Memory allocation failure
* @retval CRYPT_BN_OPTIMIZER_GET_FAIL Failed to apply for space from the optimizer.
* @retval CRYPT_BN_ERR_DIVISOR_ZERO module cannot be 0.
* @retval CRYPT_BN_ERR_EXP_NO_NEGATIVE exponent cannot be a negative number
*/
int32_t BN_ModExp(BN_BigNum *r, const BN_BigNum *a, const BN_BigNum *e,
const BN_BigNum *m, BN_Optimizer *opt);
/**
* @ingroup bn
* @brief BigNum modulo
* @par Description: r = a mod m
*
* @param r [OUT] Modulus result
* @param a [IN] BigNum
* @param m [IN] mod
* @param opt [IN] Optimizer
*
* @retval CRYPT_SUCCESS
* @retval CRYPT_NULL_INPUT Invalid null pointer
* @retval CRYPT_MEM_ALLOC_FAIL Memory allocation failure
* @retval CRYPT_BN_OPTIMIZER_GET_FAIL Failed to apply for space from the optimizer.
* @retval CRYPT_BN_ERR_DIVISOR_ZERO module cannot be 0.
*/
int32_t BN_Mod(BN_BigNum *r, const BN_BigNum *a, const BN_BigNum *m, BN_Optimizer *opt);
/**
* @ingroup bn
* @brief BigNum modulo limb
* @par Description: r = a mod m
*
* @param r [OUT] Modulus result
* @param a [IN] BigNum
* @param m [IN] Modulus (limb)
*
* @retval CRYPT_SUCCESS
* @retval CRYPT_NULL_INPUT Invalid null pointer
* @retval CRYPT_MEM_ALLOC_FAIL Memory allocation failure
* @retval CRYPT_BN_ERR_DIVISOR_ZERO module cannot be 0.
*/
int32_t BN_ModLimb(BN_UINT *r, const BN_BigNum *a, const BN_UINT m);
#ifdef HITLS_CRYPTO_BN_PRIME
/**
* @ingroup bn
* @brief generate BN prime
*
* @param r [OUT] Generate a prime number.
* @param e [OUT] A helper prime to reduce the number of Miller-Rabin primes check.
* @param bits [IN] Length of the generated prime number
* @param half [IN] Whether to generate a prime number greater than the maximum value of this prime number by 1/2:
* Yes: True, No: false
* @param opt [IN] Optimizer
* @param cb [IN] BigNum callback
* @retval CRYPT_SUCCESS The prime number is successfully generated.
* @retval CRYPT_NULL_INPUT Invalid null pointer.
* @retval CRYPT_MEM_ALLOC_FAIL Memory allocation failure
* @retval CRYPT_BN_OPTIMIZER_STACK_FULL The optimizer stack is full.
* @retval CRYPT_BN_OPTIMIZER_GET_FAIL Failed to apply for space from the optimizer.
* @retval CRYPT_BN_NOR_GEN_PRIME Failed to generate prime numbers.
* @retval CRYPT_NO_REGIST_RAND No random number is registered.
* @retval CRYPT_BN_RAND_GEN_FAIL Failed to generate a random number.
*/
int32_t BN_GenPrime(BN_BigNum *r, BN_BigNum *e, uint32_t bits, bool half, BN_Optimizer *opt, BN_CbCtx *cb);
/**
* @ingroup bn
* @brief check prime number
*
* @param bn [IN] Prime number to be checked
* @param checkTimes [IN] the user can set the check times of miller-rabin testing.
* if checkTimes == 0, it will use the default detection times of miller-rabin.
* @param opt [IN] Optimizer
*
* @retval CRYPT_SUCCESS The check result is a prime number.
* @retval CRYPT_BN_NOR_CHECK_PRIME The check result is a non-prime number.
* @retval CRYPT_NULL_INPUT Invalid null pointer
* @retval CRYPT_BN_OPTIMIZER_STACK_FULL The optimizer stack is full.
* @retval CRYPT_BN_OPTIMIZER_GET_FAIL Failed to apply for space from the optimizer.
* @retval CRYPT_NO_REGIST_RAND No random number is registered.
* @retval CRYPT_BN_RAND_GEN_FAIL Failed to generate a random number.
*/
int32_t BN_PrimeCheck(const BN_BigNum *bn, uint32_t checkTimes, BN_Optimizer *opt, BN_CbCtx *cb);
#endif // HITLS_CRYPTO_BN_PRIME
#ifdef HITLS_CRYPTO_BN_RAND
#define BN_RAND_TOP_NOBIT 0 /* Not set bits */
#define BN_RAND_TOP_ONEBIT 1 /* Set the most significant bit to 1. */
#define BN_RAND_TOP_TWOBIT 2 /* Set the highest two bits to 1 */
#define BN_RAND_BOTTOM_NOBIT 0 /* Not set bits */
#define BN_RAND_BOTTOM_ONEBIT 1 /* Set the least significant bit to 1. */
#define BN_RAND_BOTTOM_TWOBIT 2 /* Set the least significant two bits to 1. */
/**
* @ingroup bn
* @brief generate random BigNum
*
* @param r [OUT] Generate a random number.
* @param bits [IN] Length of the generated prime number
* @param top [IN] Generating the flag indicating whether to set the most significant bit of a random number
* @param bottom [IN] Generate the flag indicating whether to set the least significant bit of the random number.
*
* @retval CRYPT_SUCCESS A random number is generated successfully.
* @retval CRYPT_NULL_INPUT Invalid null pointer
* @retval CRYPT_MEM_ALLOC_FAIL Memory allocation failure
* @retval CRYPT_BN_ERR_RAND_TOP_BOTTOM The top or bottom is invalid during random number generation.
* @retval CRYPT_NO_REGIST_RAND No random number is registered.
* @retval CRYPT_BN_RAND_GEN_FAIL Failed to generate a random number.
* @retval CRYPT_BN_ERR_RAND_BITS_NOT_ENOUGH The bit is too small during random number generation.
*/
int32_t BN_Rand(BN_BigNum *r, uint32_t bits, uint32_t top, uint32_t bottom);
/**
* @ingroup bn
* @brief generate random BigNum
*
* @param libCtx [IN] provider libCtx
* @param r [OUT] Generate a random number.
* @param bits [IN] Length of the generated prime number
* @param top [IN] Generating the flag indicating whether to set the most significant bit of a random number
* @param bottom [IN] Generate the flag indicating whether to set the least significant bit of the random number.
*
* @retval CRYPT_SUCCESS A random number is generated successfully.
* @retval CRYPT_NULL_INPUT Invalid null pointer
* @retval CRYPT_MEM_ALLOC_FAIL Memory allocation failure
* @retval CRYPT_BN_ERR_RAND_TOP_BOTTOM The top or bottom is invalid during random number generation.
* @retval CRYPT_BN_RAND_GEN_FAIL Failed to generate a random number.
* @retval CRYPT_BN_ERR_RAND_BITS_NOT_ENOUGH The bit is too small during random number generation.
*/
int32_t BN_RandEx(void *libCtx, BN_BigNum *r, uint32_t bits, uint32_t top, uint32_t bottom);
/**
* @ingroup bn
* @brief generate random BigNum
*
* @param r [OUT] Generate a random number.
* @param p [IN] Compare data so that the generated r < p
*
* @retval CRYPT_SUCCESS A random number is successfully generated.
* @retval CRYPT_NULL_INPUT Invalid null pointer
* @retval CRYPT_MEM_ALLOC_FAIL Memory allocation failure
* @retval CRYPT_NO_REGIST_RAND No random number is registered.
* @retval CRYPT_BN_RAND_GEN_FAIL Failed to generate a random number.
* @retval CRYPT_BN_ERR_RAND_ZERO Generate a random number smaller than 0.
* @retval CRYPT_BN_ERR_RAND_NEGATE Generate a negative random number.
*/
int32_t BN_RandRange(BN_BigNum *r, const BN_BigNum *p);
/**
* @ingroup bn
* @brief generate random BigNum
*
* @param libCtx [IN] provider libCtx
* @param r [OUT] Generate a random number.
* @param p [IN] Compare data so that the generated r < p
*
* @retval CRYPT_SUCCESS A random number is successfully generated.
* @retval CRYPT_NULL_INPUT Invalid null pointer
* @retval CRYPT_MEM_ALLOC_FAIL Memory allocation failure
* @retval CRYPT_BN_RAND_GEN_FAIL Failed to generate a random number.
* @retval CRYPT_BN_ERR_RAND_ZERO Generate a random number smaller than 0.
* @retval CRYPT_BN_ERR_RAND_NEGATE Generate a negative random number.
*/
int32_t BN_RandRangeEx(void *libCtx, BN_BigNum *r, const BN_BigNum *p);
#endif
/**
* @ingroup bn
* @brief Binary to BigNum
*
* @param r [OUT] BigNum
* @param bin [IN] Data stream to be converted
* @param binLen [IN] Data stream length
*
* @retval CRYPT_SUCCESS
* @retval CRYPT_NULL_INPUT Invalid null pointer
* @retval CRYPT_MEM_ALLOC_FAIL Memory allocation failure
*/
int32_t BN_Bin2Bn(BN_BigNum *r, const uint8_t *bin, uint32_t binLen);
/**
* @ingroup bn
* @brief Convert BigNum to a big-endian binary
*
* @param a [IN] BigNum
* @param bin [IN/OUT] Data stream to be converted -- The input pointer cannot be null.
* @param binLen [IN/OUT] Data stream length -- When input, binLen is also the length of the bin buffer.
*
* @retval CRYPT_SUCCESS
* @retval CRYPT_NULL_INPUT Invalid null pointer
* @retval CRYPT_MEM_ALLOC_FAIL Memory allocation failure
* @retval CRYPT_SECUREC_FAIL An error occurred during the copy.
*/
int32_t BN_Bn2Bin(const BN_BigNum *a, uint8_t *bin, uint32_t *binLen);
/**
* @ingroup bn
* @brief fix size of BigNum
*
* @param a [IN] BigNum
*
* @retval void
*/
void BN_FixSize(BN_BigNum *a);
/**
* @ingroup bn
* @brief
*
* @param a [IN/OUT] BigNum
* @param words [IN] the bn room that the caller wanted.
*
* @retval CRYPT_SUCCESS
* @retval others, see crypt_errno.h
*/
int32_t BN_Extend(BN_BigNum *a, uint32_t words);
/**
* @ingroup bn
* @brief Convert BigNum to binary to obtain big-endian data with the length of binLen.
* The most significant bits are filled with 0.
*
* @param a [IN] BigNum
* @param bin [OUT] Data stream to be converted -- The input pointer cannot be null.
* @param binLen [IN] Data stream length -- When input, binLen is also the length of the bin buffer.
*
* @retval CRYPT_SUCCESS
* @retval CRYPT_NULL_INPUT Invalid null pointer
* @retval CRYPT_BN_BUFF_LEN_NOT_ENOUGH The space is insufficient.
*/
int32_t BN_Bn2BinFixZero(const BN_BigNum *a, uint8_t *bin, uint32_t binLen);
#ifdef HITLS_CRYPTO_BN_STR_CONV
/**
* @ingroup bn
* @brief Hexadecimal to a BigNum
*
* @param r [OUT] BigNum
* @param r [IN] Data stream to be converted
*
* @retval CRYPT_SUCCESS
* @retval CRYPT_NULL_INPUT Invalid null pointer
* @retval CRYPT_MEM_ALLOC_FAIL Memory allocation failure
* @retval CRYPT_BN_CONVERT_INPUT_INVALID Invalid string
*/
int32_t BN_Hex2Bn(BN_BigNum **r, const char *str);
/**
* @ingroup bn
* @brief Convert BigNum to hexadecimal number
*
* @param a [IN] BigNum
* @param char [OUT] Converts a hexadecimal string.
*
* @retval CRYPT_SUCCESS
* @retval CRYPT_NULL_INPUT Invalid null pointer
* @retval CRYPT_MEM_ALLOC_FAIL Memory allocation failure
*/
char *BN_Bn2Hex(const BN_BigNum *a);
/**
* @ingroup bn
* @brief Decimal to BigNum
*
* @param r [OUT] BigNum
* @param str [IN] A decimal string to be converted
*
* @retval CRYPT_SUCCESS
* @retval CRYPT_NULL_INPUT Invalid null pointer
* @retval CRYPT_MEM_ALLOC_FAIL Memory allocation failure
* @retval CRYPT_BN_CONVERT_INPUT_INVALID Invalid string
*/
int32_t BN_Dec2Bn(BN_BigNum **r, const char *str);
/**
* @ingroup bn
* @brief Convert BigNum to decimal number
*
* @param r [IN] BigNum
*
* @retval A decimal string after conversion or push error.
*/
char *BN_Bn2Dec(const BN_BigNum *a);
#endif
#if defined(HITLS_CRYPTO_CURVE_SM2_ASM) || \
((defined(HITLS_CRYPTO_CURVE_NISTP521) || defined(HITLS_CRYPTO_CURVE_NISTP384_ASM)) && \
defined(HITLS_CRYPTO_NIST_USE_ACCEL))
/**
* @ingroup bn
* @brief Converting a 64-bit unsigned number array to a BigNum
*
* @param r [OUT] BigNum
* @param array [IN] Array to be converted
* @param len [IN] Number of elements in the array
*
* @retval CRYPT_SUCCESS
* @retval CRYPT_NULL_INPUT Invalid null pointer
* @retval CRYPT_MEM_ALLOC_FAIL Memory allocation failure
*/
int32_t BN_U64Array2Bn(BN_BigNum *r, const uint64_t *array, uint32_t len);
/**
* @ingroup bn
* @brief BigNum to 64-bit unsigned number array
*
* @param a [IN] BigNum
* @param array [IN/OUT] Array for storing results -- The input pointer cannot be null.
* @param len [IN/OUT] Length of the written array -- Number of writable elements when input
*
* @retval CRYPT_SUCCESS
* @retval CRYPT_NULL_INPUT Invalid null pointer
* @retval CRYPT_MEM_ALLOC_FAIL Memory allocation failure
* @retval CRYPT_SECUREC_FAIL A copy error occurs.
*/
int32_t BN_Bn2U64Array(const BN_BigNum *a, uint64_t *array, uint32_t *len);
#endif
/**
* @ingroup bn
* @brief BigNum optimizer creation
*
* @param None
*
* @retval Not NULL Success
* @retval NULL failure
*/
BN_Optimizer *BN_OptimizerCreate(void);
/**
* @ingroup bn
* @brief Destroy the BigNum optimizer.
*
* @param opt [IN] BigNum optimizer
*
* @retval none
*/
void BN_OptimizerDestroy(BN_Optimizer *opt);
/**
* @ingroup bn
* @brief set library context
*
* @param libCtx [IN] Library context
* @param opt [OUT] BigNum optimizer
*
* @retval none
*/
void BN_OptimizerSetLibCtx(void *libCtx, BN_Optimizer *opt);
/**
* @ingroup bn
* @brief get library context
*
* @param opt [In] BigNum optimizer
*
* @retval library context
*/
void *BN_OptimizerGetLibCtx(BN_Optimizer *opt);
/**
* @ingroup bn
* @brief BigNum Montgomery context creation and setting
*
* @param m [IN] Modulus m, which must be positive and odd
*
* @retval Not NULL Success
* @retval NULL failure
*/
BN_Mont *BN_MontCreate(const BN_BigNum *m);
/**
* @ingroup bn
* @brief BigNum Montgomery modular exponentiation.
* Whether to use the constant API depends on the property of the BigNum.
*
* @param r [OUT] Modular exponentiation result
* @param a [IN] base
* @param e [IN] Index
* @param mont [IN] Montgomery context
* @param opt [IN] Optimizer
*
* @retval CRYPT_SUCCESS calculated successfully.
* @retval CRYPT_NULL_INPUT Invalid null pointer
* @retval CRYPT_MEM_ALLOC_FAIL Memory allocation failure
* @retval CRYPT_BN_OPTIMIZER_GET_FAIL Failed to apply for space from the optimizer.
* @retval CRYPT_BN_MONT_BASE_TOO_MAX Montgomery modulus exponentiation base is too large
* @retval CRYPT_BN_OPTIMIZER_STACK_FULL The optimizer stack is full.
* @retval CRYPT_BN_ERR_EXP_NO_NEGATE exponent cannot be a negative number
*/
int32_t BN_MontExp(BN_BigNum *r, const BN_BigNum *a, const BN_BigNum *e, BN_Mont *mont,
BN_Optimizer *opt);
/**
* @ingroup bn
* @brief Constant time BigNum Montgomery modular exponentiation
*
* @param r [OUT] Modular exponentiation result
* @param a [IN] base
* @param e [IN] exponent
* @param mont [IN] Montgomery context
* @param opt [IN] Optimizer
*
* @retval CRYPT_SUCCESS calculated successfully.
* @retval CRYPT_NULL_INPUT Invalid null pointer
* @retval CRYPT_MEM_ALLOC_FAIL Memory allocation failure
* @retval CRYPT_BN_OPTIMIZER_GET_FAIL Failed to apply for space from the optimizer.
* @retval CRYPT_BN_MONT_BASE_TOO_MAX Montgomery Modular exponentiation base is too large
* @retval CRYPT_BN_OPTIMIZER_STACK_FULL The optimizer stack is full.
* @retval CRYPT_BN_ERR_EXP_NO_NEGATE exponent cannot be a negative number
*/
int32_t BN_MontExpConsttime(BN_BigNum *r, const BN_BigNum *a, const BN_BigNum *e,
BN_Mont *mont, BN_Optimizer *opt);
/**
* @ingroup mont
* @brief BigNum Montgomery Context Destruction
*
* @param mont [IN] BigNum Montgomery context
*
* @retval none
*/
void BN_MontDestroy(BN_Mont *mont);
/**
* @ingroup bn
* @brief shift a BigNum to the right
*
* @param r [OUT] Shift result
* @param a [IN] Source data
* @param n [IN] Shift bit num
*
* @retval CRYPT_SUCCESS succeeded.
* @retval CRYPT_NULL_INPUT Invalid null pointer
* @retval CRYPT_MEM_ALLOC_FAIL Memory allocation failure
* @retval CRYPT_SECUREC_FAIL The security function returns an error.
*/
int32_t BN_Rshift(BN_BigNum *r, const BN_BigNum *a, uint32_t n);
/**
* @ingroup bn
* @brief shift a BigNum to the left
*
* @param r [OUT] Shift result
* @param a [IN] Source data
* @param n [IN] Shift bit num
*
* @retval CRYPT_SUCCESS succeeded.
* @retval CRYPT_NULL_INPUT Invalid null pointer
* @retval CRYPT_MEM_ALLOC_FAIL Memory allocation failure
*/
int32_t BN_Lshift(BN_BigNum *r, const BN_BigNum *a, uint32_t n);
#ifdef HITLS_CRYPTO_DSA
int32_t BN_MontExpMul(BN_BigNum *r, const BN_BigNum *a1, const BN_BigNum *e1,
const BN_BigNum *a2, const BN_BigNum *e2, BN_Mont *mont, BN_Optimizer *opt);
#endif
#ifdef HITLS_CRYPTO_ECC
/**
* @ingroup bn
* @brief Mould opening root
* @par Description: r^2 = a mod p; p-1=q*2^s.
* In the current implementation s=1 will take a special branch, and the calculation speed is faster.
* The fast calculation branch with s=2 is not implemented currently.
* Currently, the s corresponding to the mod p of the EC nist224, 256, 384, and 521 is 96, 1, 1, and 1 respectively
* The branch with s=2 is not used.
* The root number is provided for the EC.
* @param r [OUT] Modular root result
* @param a [IN] Source data, 0 <= a <= p-1
* @param p [IN] module, odd prime number
* @param opt [IN] Optimizer
*
* @retval CRYPT_SUCCESS calculated successfully.
* @retval CRYPT_NULL_INPUT Invalid null pointer
* @retval CRYPT_BN_ERR_SQRT_PARA The input parameter is incorrect.
* @retval CRYPT_BN_ERR_LEGENDE_DATA:
* Failed to find the specific number of the Legendre sign (z|p) of z to p equal to -1 when calculating the square root.
* @retval CRYPT_BN_ERR_NO_SQUARE_ROOT The square root cannot be found.
*/
int32_t BN_ModSqrt(BN_BigNum *r, const BN_BigNum *a, const BN_BigNum *p, BN_Optimizer *opt);
#endif
#if defined(HITLS_CRYPTO_CURVE_SM2_ASM) || (defined(HITLS_CRYPTO_CURVE_NISTP256_ASM) && \
defined(HITLS_CRYPTO_NIST_USE_ACCEL))
/**
* @ingroup bn
* @brief BigNum to BN_UINT array
*
* @param src [IN] BigNum
* @param dst [OUT] BN_UINT array for receiving the conversion result
* @param size [IN] Length of the dst buffer
*
* @retval CRYPT_SUCCESS
* @retval CRYPT_NULL_INPUT Invalid null pointer
* @retval CRYPT_MEM_ALLOC_FAIL Memory allocation failure
* @retval CRYPT_SECUREC_FAIL The security function returns an error.
*/
int32_t BN_BN2Array(const BN_BigNum *src, BN_UINT *dst, uint32_t size);
/**
* @ingroup bn
* @brief BN_UINT array to BigNum
*
* @param dst [OUT] BigNum
* @param src [IN] BN_UINT array to be converted
* @param size [IN] Length of the src buffer
*
* @retval CRYPT_SUCCESS
* @retval CRYPT_NULL_INPUT Invalid null pointer.
* @retval CRYPT_MEM_ALLOC_FAIL Memory allocation failure
*/
int32_t BN_Array2BN(BN_BigNum *dst, const BN_UINT *src, const uint32_t size);
#endif
#ifdef HITLS_CRYPTO_ECC
/**
* @ingroup bn
* @brief Copy with the mask. When the mask is set to (0), r = a; when the mask is set to (-1), r = b.
*
* @attention Data r, a, and b must have the same room.
*
* @param r [OUT] Output result
* @param a [IN] Source data
* @param b [IN] Source data
* @param mask [IN] Mask data
*
* @retval CRYPT_SUCCESS succeeded.
* @retval For details about other errors, see crypt_errno.h.
*/
int32_t BN_CopyWithMask(BN_BigNum *r, const BN_BigNum *a, const BN_BigNum *b, BN_UINT mask);
/**
* @ingroup bn
* @brief Calculate r = (a - b) % mod
*
* @attention This API is invoked in the area where ECC point computing is intensive and is performance-sensitive.
* The user must ensure that a < mod, b < mod
* In addition, a->room and b->room are not less than mod->size.
* All data are non-negative
* The mod information cannot be 0.
* Otherwise, the interface may not be functional.
*
* @param r [OUT] Output result
* @param a [IN] Source data
* @param b [IN] Source data
* @param mod [IN] Modular data
* @param opt [IN] Optimizer
*
* @retval CRYPT_SUCCESS succeeded.
* @retval For details about other errors, see crypt_errno.h.
*/
int32_t BN_ModSubQuick(BN_BigNum *r, const BN_BigNum *a, const BN_BigNum *b,
const BN_BigNum *mod, const BN_Optimizer *opt);
/**
* @ingroup bn
* @brief Calculate r = (a + b) % mod
*
* @attention This API is invoked in the area where ECC point computing is intensive and is performance-sensitive.
* The user must ensure that a < mod, b < mod
* In addition, a->room and b->room are not less than mod->size.
* All data are non-negative
* The mod information cannot be 0.
* Otherwise, the interface may not be functional.
*
* @param r [OUT] Output result
* @param a [IN] Source data
* @param b [IN] Source data
* @param mod [IN] Modular data
* @param opt [IN] Optimizer
*
* @retval CRYPT_SUCCESS succeeded.
* @retval For details about other errors, see crypt_errno.h.
*/
int32_t BN_ModAddQuick(BN_BigNum *r, const BN_BigNum *a, const BN_BigNum *b,
const BN_BigNum *mod, const BN_Optimizer *opt);
/**
* @ingroup bn
* @brief Calculate r = (a * b) % mod
*
* @attention This API is invoked in the area where ECC point computing is intensive and is performance sensitive.
* The user must ensure that a < mod, b < mod
* In addition, a->room and b->room are not less than mod->size.
* All data are non-negative
* The mod information can only be the parameter p of the curve of nistP224, nistP256, nistP384, and nistP521.
* Otherwise, the interface may not be functional.
*
* @param r [OUT] Output result
* @param a [IN] Source data
* @param b [IN] Source data
* @param mod [IN] Modular data
* @param opt [IN] Optimizer
*
* @retval CRYPT_SUCCESS succeeded.
* @retval For other errors, see crypt_errno.h.
*/
int32_t BN_ModNistEccMul(BN_BigNum *r, const BN_BigNum *a, const BN_BigNum *b,
void *mod, BN_Optimizer *opt);
/**
* @ingroup bn
* @brief Calculate r = (a ^ 2) % mod
*
* @attention This API is invoked in the area where ECC point computing is intensive and is performance sensitive.
* The user must guarantee a < mod
* In addition, a->room are not less than mod->size.
* All data are non-negative
* The mod information can only be the parameter p of the curve of nistP224, nistP256, nistP384, and nistP521.
* Otherwise, the interface may not be functional.
*
* @param r [OUT] Output result
* @param a [IN] Source data
* @param mod [IN] Modular data
* @param opt [IN] Optimizer
*
* @retval CRYPT_SUCCESS succeeded.
* @retval For details about other errors, see crypt_errno.h.
*/
int32_t BN_ModNistEccSqr(BN_BigNum *r, const BN_BigNum *a, void *mod, BN_Optimizer *opt);
#endif
#ifdef HITLS_CRYPTO_CURVE_SM2
/**
* @ingroup ecc
* @brief sm2 curve: calculate r = (a*b)% mod
*
* @attention This API is invoked in the area where ECC point computing is intensive and is performance sensitive.
* The user must guarantee a < mod、b < mod
* In addition, a->room and b->room are not less than mod->size.
* All data are non-negative
* The mod information can only be the parameter p of the curve of sm2.
* Otherwise, the interface may not be functional.
*
* @param r [OUT] Output result
* @param a [IN] Source data
* @param b [IN] Source data
* @param mod [IN] Modular data
* @param opt [IN] Optimizer
*
* @retval CRYPT_SUCCESS succeeded.
* @retval For details about other errors, see crypt_errno.h.
*/
int32_t BN_ModSm2EccMul(BN_BigNum *r, const BN_BigNum *a, const BN_BigNum *b, void *data, BN_Optimizer *opt);
/**
* @ingroup ecc
* @brief sm2 curve: calculate r = (a ^ 2) % mod
*
* @attention This API is invoked in the area where ECC point computing is intensive and is performance sensitive.
* The user must guarantee a < mod
* In addition, a->room are not less than mod->size.
* All data are non-negative
* The mod information can only be the parameter p of the curve of sm2.
* Otherwise, the interface may not be functional.
*
* @param r [OUT] Output result
* @param a [IN] Source data
* @param mod [IN] Modular data
* @param opt [IN] Optimizer
*
* @retval CRYPT_SUCCESS succeeded.
* @retval For details about other errors, see crypt_errno.h.
*/
int32_t BN_ModSm2EccSqr(BN_BigNum *r, const BN_BigNum *a, void *data, BN_Optimizer *opt);
#endif
#ifdef HITLS_CRYPTO_BN_PRIME_RFC3526
/**
* @ingroup bn
* @brief Return the corresponding length of modulo exponent of the BigNum.
*
* @param r [OUT] Output result
* @param len [IN] Length
*
* @retval Not NULL Success
* @retval NULL failure
*/
BN_BigNum *BN_GetRfc3526Prime(BN_BigNum *r, uint32_t len);
#endif
/**
* @ingroup bn
* @brief Return the number of security bits provided by a specific algorithm and specific key size.
*
* @param [OUT] Output the result.
* @param pubLen [IN] Size of the public key
* @param prvLen [IN] Size of the private key.
*
* @retval Number of security bits
*/
int32_t BN_SecBits(int32_t pubLen, int32_t prvLen);
#if defined(HITLS_CRYPTO_RSA)
/**
* @ingroup bn
* @brief Montgomery modulus calculation process, need a < m, b < m, All is positive numbers, The large number
optimizer must be enabled before this function is used.
*
* @param r [OUT] Output results
* @param a [IN] Input data
* @param b [IN] Input data
* @param mont [IN] Montgomery context
* @param opt [IN] Large number optimizer
*
* @retval CRYPT_SUCCESS
* @retval For details about other errors, see crypt_errno.h.
*/
int32_t MontMulCore(BN_BigNum *r, const BN_BigNum *a, const BN_BigNum *b, BN_Mont *mont, BN_Optimizer *opt);
#endif // HITLS_CRYPTO_RSA
#if defined(HITLS_CRYPTO_BN_PRIME)
/**
* @ingroup bn
* @brief Montgomery modulus calculation process, need a < m, unlimited symbols.
*
* @param r [OUT] Output results
* @param a [IN] Input data
* @param mont [IN] Montgomery context
* @param opt [IN] Large number optimizer
*
* @retval CRYPT_SUCCESS
* @retval For details about other errors, see crypt_errno.h.
*/
int32_t MontSqrCore(BN_BigNum *r, const BN_BigNum *a, BN_Mont *mont, BN_Optimizer *opt);
#endif // HITLS_CRYPTO_BN_PRIME
/**
* @ingroup bn
* @brief Enabling the big data optimizer
*
* @param opt [IN] Large number optimizer
*
* @retval CRYPT_SUCCESS
* @retval For details about other errors, see crypt_errno.h.
*/
int32_t OptimizerStart(BN_Optimizer *opt);
/**
* @ingroup bn
* @brief Disabling the Large Number Optimizer
*
* @param opt [IN] Large number optimizer
*
* @retval CRYPT_SUCCESS
* @retval For details about other errors, see crypt_errno.h.
*/
void OptimizerEnd(BN_Optimizer *opt);
/**
* @ingroup bn
* @brief Get Bn from the large number optimizer.
*
* @param opt [IN] Large number optimizer
* @param room [IN] Length of the big number.
*
* @retval BN_BigNum if success
* @retval NULL if failed
*/
BN_BigNum *OptimizerGetBn(BN_Optimizer *opt, uint32_t room);
#if defined(HITLS_CRYPTO_PAILLIER) || defined(HITLS_CRYPTO_RSA_CHECK)
/**
* @ingroup bn
* @brief BigNum Calculate the least common multiple
* @par Description: lcm(a, b) (a, b!=0)
*
* @param r [OUT] least common multiple
* @param a [IN] BigNum
* @param b [IN] BigNum
* @param opt [IN] Optimizer
*
* @retval CRYPT_SUCCESS
* @retval CRYPT_NULL_INPUT Invalid null pointer
* @retval CRYPT_MEM_ALLOC_FAIL Memory allocation failure
*/
int32_t BN_Lcm(BN_BigNum *r, const BN_BigNum *a, const BN_BigNum *b, BN_Optimizer *opt);
#endif // HITLS_CRYPTO_PAILLIER || HITLS_CRYPTO_RSA_CHECK
/**
* @ingroup bn
* @brief Enabling the big data optimizer
*
* @param opt [IN] Large number optimizer
*
* @retval CRYPT_SUCCESS
* @retval For details about other errors, see crypt_errno.h.
*/
int32_t OptimizerStart(BN_Optimizer *opt);
/**
* @ingroup bn
* @brief Disabling the Large Number Optimizer
*
* @param opt [IN] Large number optimizer
*
* @retval CRYPT_SUCCESS
* @retval For details about other errors, see crypt_errno.h.
*/
void OptimizerEnd(BN_Optimizer *opt);
/**
* @ingroup bn
* @brief Get Bn from the large number optimizer.
*
* @param opt [IN] Large number optimizer
* @param room [IN] Length of the big number.
*
* @retval BN_BigNum if success
* @retval NULL if failed
*/
BN_BigNum *OptimizerGetBn(BN_Optimizer *opt, uint32_t room);
#ifdef HITLS_CRYPTO_CURVE_MONT
/**
* a, b is mont form.
* r = a * b
*/
int32_t BN_EcPrimeMontMul(BN_BigNum *r, const BN_BigNum *a, const BN_BigNum *b, void *data, BN_Optimizer *opt);
/**
* a is mont form.
* r = a ^ 2
*/
int32_t BN_EcPrimeMontSqr(BN_BigNum *r, const BN_BigNum *a, void *mont, BN_Optimizer *opt);
/**
* r = Reduce(r * RR)
*/
int32_t BnMontEnc(BN_BigNum *r, BN_Mont *mont, BN_Optimizer *opt, bool consttime);
/**
* r = Reduce(r)
*/
void BnMontDec(BN_BigNum *r, BN_Mont *mont);
/**
* This interface is a constant time.
* if mask = BN_MASK. swap a and b.
* if mask = 0, a and b remain as they are.
*/
int32_t BN_SwapWithMask(BN_BigNum *a, BN_BigNum *b, BN_UINT mask);
#endif // HITLS_CRYPTO_CURVE_MONT
#ifdef __cplusplus
}
#endif
#endif /* HITLS_CRYPTO_BN */
#endif
| 2302_82127028/openHiTLS-examples_1508 | crypto/bn/include/crypt_bn.h | C | unknown | 48,680 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#include "hitls_build.h"
#ifdef HITLS_CRYPTO_BN
#include <stdint.h>
#include "bn_bincal.h"
#ifndef HITLS_SIXTY_FOUR_BITS
#error Bn binical x8664 optimizer must open BN-64.
#endif
// r = a + b, len = n, return carry
BN_UINT BinAdd(BN_UINT *r, const BN_UINT *a, const BN_UINT *b, uint32_t n)
{
if (n == 0) {
return 0;
}
BN_UINT ret = 0;
BN_UINT times = n >> 2;
BN_UINT rem = n & 3;
asm volatile(
".align 3 \n"
" mov %0, #1 \n"
" adcs %0, xzr, %0 \n" // clear C flags
" mov %0, #0 \n"
" cbz %1, 3f \n"
"4: add x4, %3, %0 \n"
" add x5, %4, %0 \n"
" add x6, %5, %0 \n"
" ldp x7, x8, [x5] \n"
" ldp x9, x10, [x5,#16] \n"
" ldp x11, x12, [x6] \n"
" ldp x13, x14, [x6,#16] \n"
" adcs x7, x7, x11 \n"
" adcs x8, x8, x12 \n"
" adcs x9, x9, x13 \n"
" adcs x10, x10, x14 \n"
" stp x7, x8, [x4] \n"
" stp x9, x10, [x4, #16] \n"
" sub %1, %1, #0x1 \n"
" add %0, %0, #0x20 \n"
" cbnz %1, 4b \n"
"3: cbz %2, 2f \n" // times <= 0, jump to single cycle
"1: ldr x7, [%4, %0] \n"
" ldr x8, [%5, %0] \n"
" adcs x7, x7, x8 \n"
" str x7, [%3, %0] \n"
" sub %2, %2, #0x1 \n"
" add %0, %0, #0x8 \n"
" cbnz %2, 1b \n"
"2: mov %0, #0 \n"
" adcs %0, xzr, %0 \n"
:"+&r" (ret), "+r"(times), "+r"(rem)
:"r"(r), "r"(a), "r"(b)
:"x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "cc", "memory");
return ret & 1;
}
// r = a - b, len = n, return carry
BN_UINT BinSub(BN_UINT *r, const BN_UINT *a, const BN_UINT *b, uint32_t n)
{
if (n == 0) {
return 0;
}
BN_UINT ret = 0;
BN_UINT rem = n & 3;
BN_UINT times = n >> 2;
asm volatile(
".align 3 \n"
" mov %0, #1 \n"
" sbcs %0, %0, xzr \n" // clear C flags
" mov %0, #0 \n"
" cbz %1, 2f \n"
"4: add x4, %3, %0 \n"
" add x5, %4, %0 \n"
" add x6, %5, %0 \n"
" ldp x7, x8, [x5] \n"
" ldp x9, x10, [x5,#16] \n"
" ldp x11, x12, [x6] \n"
" ldp x13, x14, [x6,#16] \n"
" sbcs x7, x7, x11 \n"
" sbcs x8, x8, x12 \n"
" sbcs x9, x9, x13 \n"
" sbcs x10, x10, x14 \n"
" stp x7, x8, [x4] \n"
" stp x9, x10, [x4, #16] \n"
" sub %1, %1, #0x1 \n"
" add %0, %0, #0x20 \n"
" cbnz %1, 4b \n"
"2: cbz %2, 3f \n" // times <= 0, jump to single cycle
"1: ldr x7, [%4, %0] \n"
" ldr x8, [%5, %0] \n"
" sbcs x7, x7, x8 \n"
" str x7, [%3, %0] \n"
" sub %2, %2, #0x1 \n"
" add %0, %0, #0x8 \n"
" cbnz %2, 1b \n"
"3: mov %0,#0 \n"
" sbcs %0,xzr,%0 \n"
:"+&r" (ret), "+r"(times), "+r"(rem)
:"r"(r), "r"(a), "r"(b)
:"x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "cc", "memory");
return ret & 1;
}
// r = r - a * m, return the carry;
BN_UINT BinSubMul(BN_UINT *r, const BN_UINT *a, BN_UINT aSize, BN_UINT m)
{
BN_UINT borrow = 0;
BN_UINT i = 0;
asm volatile(
".align 3 \n"
"2: ldr x4, [%3, %1] \n" // x4 = r[i]
" ldr x5, [%4, %1] \n" // x5 = r[i]
" mul x7, x5, %5 \n" // x7 = al
" umulh x6, x5, %5 \n" // x6 = ah
" adds x7, %0, x7 \n" // x7 = borrow + al
" adcs %0, x6, xzr \n" // borrow = ah + H(borrow + al)
" cmp x7, x4 \n" // if r[i] > borrow + al, dont needs carry
" beq 1f \n"
" adc %0, %0, xzr \n"
"1: sub x4, x4, x7 \n"
" str x4, [%3, %1] \n"
" sub %2, %2, #0x1 \n"
" add %1, %1, #0x8 \n"
" cbnz %2, 2b \n"
:"+&r" (borrow), "+r"(i), "+r"(aSize)
:"r"(r), "r"(a), "r"(m)
:"x4", "x5", "x6", "x7", "cc", "memory");
return borrow;
}
/* Obtains the number of 0s in the first x most significant bits of data. */
uint32_t GetZeroBitsUint(BN_UINT x)
{
BN_UINT count;
asm ("clz %0, %1" : "=r" (count) : "r" (x));
return (uint32_t)count;
}
#endif /* HITLS_CRYPTO_BN */
| 2302_82127028/openHiTLS-examples_1508 | crypto/bn/src/armv8_bn_bincal.c | C | unknown | 7,049 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
/*
* Description: Big number Montgomery modular multiplication in armv8 implementation, MontMul_Asm
* Ref: Montgomery Multiplication
* Process: To cal A * B mod n, we can convert to mont form, and cal A*B*R^(-1).
* Detail:
* intput:A = (An-1,...,A1,A0)b, B = (Bn-1,...,B1,B0)b, n, n'
* output:A*B*R^(-1)
* tmp = (tn,tn-1,...,t1,t0)b, initialize to 0
* for i: 0 -> (n-1)
* ui = (t0 + Ai*B0)m' mod b
* t = (t + Ai*B + ui * m) / b
* if t >= m
* t -= m
* return t;
*
* Deal process:
* i. size % 8 == 0 & a == b --> Sqr8x --> complete multiplication
* --> size == 8, goto single reduce step
* --> size >= 8, goto loop reduce process
* ii. size % 4 == 0 --> Mul4x
* --> size == 4, goto single step
* --> size >= 4, goto loop process
* iii. Ordinary --> Mul1x
* --> size == 2, goto single step
* --> size >= 2, goto loop process
*
*/
#include "hitls_build.h"
#ifdef HITLS_CRYPTO_BN
#include "crypt_arm.h"
.arch armv8-a+crypto
.file "bn_mont_armv8.S"
.text
.global MontMul_Asm
.type MontMul_Asm, %function
.align 5
MontMul_Asm:
AARCH64_PACIASP
tst x5, #7
b.eq MontSqr8
tst x5, #3
b.eq MontMul4
stp x29, x30, [sp, #-64]!
mov x29, sp
stp x23, x24, [sp, #16]
stp x21, x22, [sp, #32]
stp x19, x20, [sp, #48]
sub x21, x5 , #2 // j = size-2
cbnz x21,.LMul1xBegin
// if size == 2, goto single step
ldp x15, x16, [x1] // a[0], a[1]
ldp x19, x20, [x2] // b[0], b[1]
ldp x9, x10, [x3] // n[0], n[1]
mul x23 , x15 , x19 // x23 = lo(a[0] * b[0])
umulh x24 , x15 , x19 // x24 = hi(a[0] * b[0])
mul x6, x16 , x19 // x6 = lo(a[1] * b[0])
umulh x7, x16 , x19 // x7 = hi(a[1] * b[0])
mul x11, x23 , x4 // x11 = lo(t[0] * k0)
umulh x19, x9, x11 // x19 = hi(n[0] * t[0]*k0)
mul x12, x10, x11 // x12 = lo(n[1] * t[0]*k0)
umulh x13, x10, x11 // x13 = hi(n[1] * t[0]*k0)
// we knowns a*b + n'n = 0 (mod R)
// so lo(a[0] * b[0]) + lo(n[0] * t[0]*k0) = 0 (mod R)
// if lo(a[0] * b[0]) > 0, then 'lo(a[0] * b[0]) + lo(n[0] * t[0]*k0)' would overflow,
// else if lo(a[0] * b[0]) == 0, then lo(n[0] * t[0]*k0) == 0
cmp x23, #1
adc x19, x19, xzr
adds x23 , x6, x24 // x23 = lo(a[1] * b[0]) + hi(a[0] * b[0])
adc x24 , x7, xzr // x24 = hi(a[1] * b[0]) + CF
adds x8, x12, x19 // x8 = lo(n[1] * t[0]*k0) + hi(n[0] * t[0]*k0)
adc x19, x13, xzr // x19 = hi(n[1] * t[0]*k0) + CF
adds x21, x8, x23 // x21 = lo(n[1] * t[0]*k0) + hi(n[0] * t[0]*k0)
adcs x22, x19, x24
adc x23, xzr, xzr // x23 = CF
mul x14 , x15 , x20 // a[0] * b[1]
umulh x15 , x15 , x20
mul x6, x16 , x20 // a[1] * b[1]
umulh x7, x16 , x20
adds x14 , x14 , x21
adc x15 , x15 , xzr
mul x11, x14 , x4 // t[0] * k0
umulh x20, x9, x11
mul x12, x10, x11 // n[1] * t[0]*k0
umulh x13, x10, x11
cmp x14 , #1 // Check whether the low location is carried.
adc x20, x20, xzr
adds x14 , x6, x15
adc x15 , x7, xzr
adds x21, x12, x20
adcs x20, x13, x23
adc x23, xzr, xzr
adds x14 , x14 , x22
adc x15 , x15 , xzr
adds x21, x21, x14
adcs x20, x20, x15
adc x23, x23, xzr // x23 += CF
subs x12 , x21, x9
sbcs x13, x20, x10
sbcs x23, x23, xzr // update CF
csel x10, x21, x12, lo
csel x11, x20, x13, lo
stp x10, x11, [x0]
b .LMul1xEnd
.LMul1xBegin:
mov x24, x5 // the outermost pointers of our loop
lsl x5 , x5 , #3
sub x22, sp , x5 // The space size needs to be applied for.
and x22, x22, #-16 // For 4-byte alignment
mov sp , x22 // Apply for Space.
mov x6, x5
mov x23, xzr
.LMul1xInitstack:
sub x6, x6, #8
str xzr, [x22], #8
cbnz x6, .LMul1xInitstack
mov x22, sp
.LMul1xLoopProces:
sub x24, x24, #1
sub x21, x5 , #16 // j = size-2
// Begin mulx
ldr x17, [x2], #8 // b[i]
ldp x15, x16, [x1], #16 // a[0], a[1]
ldp x9, x10, [x3], #16 // n[0], n[1]
ldr x19, [x22] // The sp val is 0 during initialization.
mul x14 , x15 , x17 // a[0] * b[i]
umulh x15 , x15 , x17
mul x6, x16 , x17 // a[1] * b[i]
umulh x7, x16 , x17
adds x14 , x14 , x19
adc x15 , x15 , xzr
mul x11, x14 , x4
umulh x9, x9, x11
mul x12, x10, x11 // n[1] * t[0]*k0
cmp x14, #1
umulh x13, x10, x11
.LMul1xPrepare:
sub x21, x21, #8 // index -= 1
ldr x16, [x1], #8 // a[i]
ldr x10, [x3], #8 // n[i]
ldr x19, [x22, #8] // t[j]
adc x9, x9, xzr
adds x14 , x6, x15
adc x15 , x7, xzr
adds x8, x12, x9
adc x9, x13, xzr
mul x6, x16, x17 // a[j] * b[i]
adds x14, x14 , x19
umulh x7, x16 , x17
adc x15, x15 , xzr
mul x12, x10, x11 // n[j] * t[0]*k0
adds x8, x8, x14
umulh x13, x10, x11
str x8, [x22], #8 // t[j-1]
cbnz x21, .LMul1xPrepare
.LMul1xReduce:
ldr x19, [x22, #8]
adc x9, x9, xzr
adds x14 , x6, x15
adc x15 , x7, xzr
adds x8, x12, x9
adcs x9, x13, x23
adc x23, xzr, xzr
adds x14 , x14 , x19
adc x15 , x15 , xzr
adds x8, x8, x14
adcs x9, x9, x15
adc x23, x23, xzr // x23 += CF, carry of the most significant bit.
stp x8, x9, [x22], #8
mov x22, sp
sub x1 , x1 , x5
subs x3 , x3 , x5 // x3 = &n[0]
cbnz x24, .LMul1xLoopProces
mov x1, x0
mov x21, x5 // get index
.LMul1xSubMod:
ldr x19, [x22], #8
ldr x10, [x3], #8
sub x21, x21, #8 // j--
sbcs x16, x19, x10 // t[j] - n[j]
str x16, [x1], #8 // r[j] = t[j] - n[j]
cbnz x21,.LMul1xSubMod
sbcs x23, x23, xzr // x23 -= CF
mov x22, sp
.LMul1xCopy:
ldr x19, [x22], #8
ldr x16, [x0]
sub x5, x5, #8 // size--
csel x10, x19, x16, lo
str x10, [x0], #8
cbnz x5 , .LMul1xCopy
.LMul1xEnd:
ldp x23, x24, [x29, #16]
mov sp , x29
ldp x21, x22, [x29, #32]
ldp x19, x20, [x29, #48]
ldr x29, [sp], #64
AARCH64_AUTIASP
ret
.size MontMul_Asm, .-MontMul_Asm
.type MontSqr8, %function
MontSqr8:
AARCH64_PACIASP
cmp x1, x2
b.ne MontMul4
stp x29, x30, [sp, #-128]! // sp = sp - 128(Modify the SP and then save the SP.), [sp] = x29, [sp + 8] = x30,
// !Indicates modification sp
mov x29, sp // x29 = sp, The sp here has been reduced by 128.
stp x27, x28, [sp, #16]
stp x25, x26, [sp, #32]
stp x23, x24, [sp, #48]
stp x21, x22, [sp, #64]
stp x19, x20, [sp, #80]
stp x0 , x3 , [sp, #96] // offload r and n, Push the pointers of r and n into the stack.
str x4 , [sp, #112] // store n0
lsl x5, x5, #3 // x5 = x5 * 8, Converts size to bytes.
sub x2, sp, x5, lsl#1 // x2 = sp - 2*x5*8, x5 = size, x2 points to the start address of a 2*size memory. *8 is to convert to bytes
mov sp, x2 // Alloca, Apply for Space.
mov x19, x5 // The lowest eight data blocks do not need to be cleared.
eor v0.16b,v0.16b,v0.16b
eor v1.16b,v1.16b,v1.16b
.LSqr8xStackInit:
sub x19, x19, #8*8 // Offset 64, cyclic increment.
st1 {v0.2d, v1.2d}, [x2], #32
st1 {v0.2d, v1.2d}, [x2], #32
st1 {v0.2d, v1.2d}, [x2], #32
st1 {v0.2d, v1.2d}, [x2], #32
cbnz x19, .LSqr8xStackInit // When x19 = 0, the loop exits.
mov x2 , sp // After clear to zero, assign sp back to x2.
ldp x27, x28, [x2]
ldp x25, x26, [x2]
ldp x23, x24, [x2]
ldp x21, x22, [x2]
add x3 , x1 , x5 // x3 = x1 + bytes(size * 8)
ldp x14 , x15 , [x1], #16 // x14 = a[0], x15 = a[1]
ldp x16 , x17 , [x1], #16 // x16 = a[2], x17 = a[3]
ldp x6, x7, [x1], #16 // x6 = a[4], x7 = a[5]
ldp x8, x9, [x1], #16 // x8 = a[6], x9 = a[7]
.LSqr8xLoopMul:
mul x10, x14, x15 // a[0] * a[1~4]
mul x11, x14, x16 // keep cache hit ratio of x6
mul x12, x14, x17
mul x13, x14, x6
adds x28, x28, x10 // x27~x22 = t[0~7], x28 = t[1] = lo(a[0]*a[1]), adds is used to set CF to 0.
adcs x25, x25, x11 // x10~x17 Used to save subsequent calculation results
mul x10, x14 , x7 // lo(a[0] * a[5~7]), keep cache hit ratio of x14, the same below
mul x11, x14 , x8
adcs x26, x26, x12
adcs x23, x23, x13 // t[4] = lo(a[0] * a[4])
adcs x24, x24, x10 // x24~x22 = t[5~7]
mul x12, x14 , x9 // lo(a[0] * a[7])
stp x27, x28, [x2], #8*2 // t[0] = a[0]^2, Because the square term is not calculated temporarily,
// so t[0] = 0, t[1] = a[0] * a[1] + carry
adcs x21, x21, x11
adcs x22, x22, x12 // t[7] += lo(a[0] * a[7]), Carrying has to be given t[8]
adc x27, xzr, xzr // x27 = CF ( Set by t[7] += lo(a[0] * a[7]) ),
umulh x13, x14 , x15 // hi(a[0] * a[1~4]), Use x17 to keep the cache hit
umulh x10, x14 , x16
umulh x11, x14 , x17
umulh x12, x14 , x6
// In the new round, the first calculation does not need to be carried, but the CF bit needs to be modified.
adds x25, x25, x13 // t[2] += hi(a[0] * a[1])
adcs x26, x26, x10
adcs x23, x23, x11
adcs x24, x24, x12 // t[5] += hi(a[0] * a[4])
umulh x13, x14 , x7 // hi(a[0] * a[5~7])
umulh x10, x14 , x8
umulh x11, x14 , x9
//----- lo(a[1] * a[2~4]) ------
adcs x21, x21, x13 // t[6] += hi(a[0] * a[5])
adcs x22, x22, x10 // t[7] += hi(a[0] * a[6])
adc x27, x27, x11 // t[8] += hi(a[0] * a[7])
mul x12, x15, x16 // lo(a[1] * a[2])
mul x13, x15, x17
mul x10, x15, x6
//----- lo(a[1] * a[5~7]) ------
adds x26, x26, x12 // t[3] += lo(a[1] * a[2]), The first calculation of this round
// does not take into account the previous carry, and the CF is not modified in line 118.
adcs x23, x23, x13 // t[4] += lo(a[1] * a[3])
adcs x24, x24, x10 // t[5] += lo(a[1] * a[4])
mul x11, x15 , x7
mul x12, x15 , x8
mul x13, x15 , x9
//----- hi(a[1] * a[2~5]) ------
adcs x21, x21, x11 // t[6] += lo(a[1] * a[5])
adcs x22, x22, x12 // t[7] += lo(a[1] * a[6])
adcs x27, x27, x13 // t[8] += lo(a[1] * a[7])
umulh x10, x15, x16 // hi(a[1] * a[2])
umulh x11, x15, x17
umulh x12, x15, x6
umulh x13, x15, x7
stp x25, x26, [x2], #8*2 // t[2] and t[3] are calculated and stored in the memory.
// x25 and x22 are used to store t[10] and t[11].
adc x28, xzr, xzr // t[9] = CF ( Set by t[8] += lo(a[1] * a[7]) )
//In the new round, the first calculation does not need to be carried, but the CF bit needs to be modified.
//----- hi(a[1] * a[6~7]) ------
adds x23, x23, x10 // t[4] += hi(a[1] * a[2])
adcs x24, x24, x11 // t[5] += hi(a[1] * a[3])
adcs x21, x21, x12 // t[6] += hi(a[1] * a[4])
umulh x10, x15 , x8 // hi(a[1] * a[6])
umulh x11, x15 , x9 // hi(a[1] * a[7])
//----- lo(a[2] * a[3~7]) ------
adcs x22, x22, x13 // t[7] += hi(a[1] * a[5])
adcs x27, x27, x10 // t[8] += hi(a[1] * a[6])
adc x28, x28, x11 // t[9] += hi(a[1] * a[7]), Here, only the carry of the previous round
mul x12, x16, x17 // lo(a[2] * a[3])
mul x13, x16, x6
mul x10, x16, x7
// of calculation is retained before x20 calculation. Add x15 to the carry.
mul x11, x16 , x8
adds x24, x24, x12 // t[5] += lo(a[2] * a[3]), For the first calculation of this round,
// the previous carry is not considered.
mul x12, x16 , x9
adcs x21, x21, x13 // t[6] += lo(a[2] * a[4])
//----- hi(a[2] * a[3~7]) ------
adcs x22, x22, x10 // t[7] += lo(a[2] * a[5])
umulh x13, x16, x17 // hi(a[2] * a[3])
umulh x10, x16, x6
adcs x27, x27, x11 // t[8] += lo(a[2] * a[6])
adcs x28, x28, x12 // t[9] += lo(a[2] * a[7])
umulh x11, x16, x7
umulh x12, x16, x8
stp x23, x24, [x2], #8*2 // After t[4] and t[5] are calculated, they are stored in the memory.
// x23 and x24 are used to store t[12] and t[13].
adc x25, xzr, xzr // t[10] = CF ( Set by t[9] += lo(a[2] * a[7]) )
// In the new round, the first calculation does not need to be carried, but the CF bit needs to be modified.
adds x21, x21, x13 // t[6] += hi(a[2] * a[3])
adcs x22, x22, x10 // t[7] += hi(a[2] * a[4])
umulh x13, x16, x9
//----- lo(a[3] * a[4~7]) ------
adcs x27, x27, x11 // t[8] += hi(a[2] * a[5])
adcs x28, x28, x12 // t[9] += hi(a[2] * a[6])
adc x25, x25, x13 // t[10] += hi(a[2] * a[7])
mul x10, x17, x6
mul x11, x17, x7
mul x12, x17, x8
mul x13, x17, x9
//----- hi(a[3] * a[4~7]) ------
adds x22, x22, x10 // t[7] += lo(a[3] * a[4])
adcs x27, x27, x11 // t[8] += lo(a[3] * a[5])
adcs x28, x28, x12 // t[9] += lo(a[3] * a[6])
adcs x25, x25, x13 // t[10] += lo(a[3] * a[7])
umulh x10, x17, x6
umulh x11, x17, x7
umulh x12, x17, x8
umulh x13, x17, x9
stp x21, x22, [x2], #8*2 // t[6] and t[7] are calculated and stored in the memory.
// x21 and x26 are used to store t[14] and t[15].
adc x26, xzr, xzr // t[11] = CF ( Set by t[10] += lo(a[3] * a[7]) )
// In the new round, the first calculation does not need to be carried, but the CF bit needs to be modified.
adds x27, x27, x10 // t[8] += hi(a[3] * a[4])
//----- lo(a[4] * a[5~7]) ------
adcs x28, x28, x11 // t[9] += hi(a[3] * a[5])
adcs x25, x25, x12 // t[10] += hi(a[3] * a[6])
adc x26, x26, x13 // t[11] += hi(a[3] * a[7])
mul x10, x6, x7
mul x11, x6, x8
mul x12, x6, x9
//----- hi(a[4] * a[5~7]) ------
adds x28, x28, x10 // t[9] += lo(a[4] * a[5])
adcs x25, x25, x11 // t[10] += lo(a[4] * a[6])
adcs x26, x26, x12 // t[11] += lo(a[4] * a[7])
umulh x13, x6, x7
umulh x10, x6, x8
umulh x11, x6, x9
//----- lo(a[5] * a[6~7]) ------
mul x12, x7, x8
// This is actually a new round, but only t[0-7] can be calculated in each cycle,
// and t[8-15] retains the intermediate calculation result.
adc x23, xzr, xzr // t[12] = CF( Set by t[11] += lo(a[4] * a[7]) )
adds x25, x25, x13 // t[10] += hi(a[4] * a[5])
adcs x26, x26, x10 // t[11] += hi(a[4] * a[6])
mul x13, x7, x9
//----- hi(a[5] * a[6~7]) ------
adc x23, x23, x11 // t[12] += hi(a[4] * a[7])
umulh x10, x7, x8
umulh x11, x7, x9
adds x26, x26, x12 // t[11] += lo(a[5] * a[6])
//----- lo(a[6] * a[7]) ------
adcs x23, x23, x13 // t[12] += lo(a[5] * a[7])
mul x12, x8, x9
//----- hi(a[6] * a[7]) ------
adc x24, xzr, xzr // t[13] = CF ( Set by t[12] += lo(a[5] * a[7]) ),
umulh x13, x8, x9
// This operation is required when a new umulh is added.
adds x23, x23, x10 // t[12] += hi(a[5] * a[6])
adc x24, x24, x11 // t[13] += hi(a[5] * a[7])
sub x19, x3, x1 // x3 = &a[size], x1 = &a[8], x19 = (size - 8) * 8
adds x24, x24, x12 // t[13] += lo(a[6] * a[7])
adc x21, xzr, xzr // t[14] = CF ( set by t[13] += lo(a[6] * a[7]) )
add x21, x21, x13 // t[14] += hi(a[6] * a[7]), There must be no carry in the last step.
cbz x19, .LSqr8xLoopMulEnd
mov x0, x1 // x0 = &a[8]
mov x22, xzr
//########################################
//# a[0~7] * a[8~15] #
//########################################
.LSqr8xHighMulBegian:
mov x19, #-8*8 // Loop range. x0 can retrieve a[0–7] based on this offset.
ldp x14 , x15 , [x2, #8*0] // x14 = t[8] , x15 = t[9]
adds x27, x27, x14 // t[8](t[8] reserved in the previous round of calculation) + = t[8]
// (t[8] taken from memory, initially 0)
adcs x28, x28, x15 // t[9] += t[9], be the same as the above
ldp x16 , x17 , [x2, #8*2] // x16 = t[10], x17 = t[11]
ldp x14 , x15 , [x1], #16 // x14 = a[8], x15 = a[9]
adcs x25, x25, x16
adcs x26, x26, x17
ldp x6, x7, [x2, #8*4] // x6 = t[12], x7 = t[13]
ldp x16 , x17 , [x1], #16 // x16 = a[10], x17 = a[11]
adcs x23, x23, x6
adcs x24, x24, x7
ldp x8, x9, [x2, #8*6] // x8 = t[14], x9 = t[15]
ldp x6, x7, [x1], #16 // x6 = a[12], x7 = a[13]
adcs x21, x21, x8
adcs x22, x22, x9 // t[15] = t[15] + CF, Because a[7]*a[7] is not calculated previously, t[15]=0
ldp x8, x9, [x1], #16 // x8 = a[14], x9 = a[15]
.LSqr8xHighMulProces:
ldr x4 , [x0, x19] // x4 = [x0 + x19] = [x0 - 56] = [&a[8] - 56] = a[8 - 7] = a[1]
//-----lo(a[0] * a[8~11])-----
adc x20, xzr, xzr // x20 += CF, Save the carry of t[15]. The same operation is performed below.
add x19, x19, #8 // x19 += 8, Loop step size
mul x10, x4 , x14 // x4 = a[0], x14 = a[8], x10 = lo(a[0] * a[8])
mul x11, x4 , x15 // x11 = lo(a[0] * a[9])
mul x12, x4 , x16 // x12 = lo(a[0] * a[10])
mul x13, x4 , x17 // x13 = lo(a[0] * a[11])
//-----lo(a[0] * a[12~15])-----
adds x27, x27, x10 // CF does not need to be added for the first calculation,
// t[8] += lo(a[0] * a[8])
adcs x28, x28, x11 // t[9] += lo(a[0] * a[9])
adcs x25, x25, x12 // t[10] += lo(a[0] * a[10])
adcs x26, x26, x13 // t[11] += lo(a[0] * a[11])
mul x10, x4 , x6
mul x11, x4 , x7
mul x12, x4 , x8
mul x13, x4 , x9
//-----hi(a[0] * a[8~11])-----
adcs x23, x23, x10 // t[12] += lo(a[0] * a[12])
adcs x24, x24, x11 // t[13] += lo(a[0] * a[13])
adcs x21, x21, x12 // t[14] += lo(a[0] * a[14])
adcs x22, x22, x13 // t[15] += lo(a[0] * a[15])
umulh x10, x4 , x14
umulh x11, x4 , x15
umulh x12, x4 , x16
umulh x13, x4 , x17
adc x20, x20, xzr // x20 += CF, Save the carry of t[15]
str x27, [x2], #8 // [x2] = t[8], x2 += 8, x27~x22 = t[9~16],
// Update the mapping relationship to facilitate cycling.
// x27~x26 always correspond to t[m~m+7], and x19 is always the LSB of the window
//-----hi(a[0] * a[12~15])-----
adds x27, x28, x10 // t[9] += hi(a[0] * a[8]), The last calculation was to calculate t[15],
// so carry cannot be added to t[9], so adds is used
adcs x28, x25, x11 // t[10] += hi(a[0] * a[9])
adcs x25, x26, x12 // t[11] += hi(a[0] * a[10])
adcs x26, x23, x13 // t[12] += hi(a[0] * a[11])
umulh x10, x4 , x6
umulh x11, x4 , x7
umulh x12, x4 , x8
umulh x13, x4 , x9 // x13 = hi(a[0] * a[15])
adcs x23, x24, x10 // t[13] += hi(a[0] * a[12])
adcs x24, x21, x11 // t[14] += hi(a[0] * a[13])
adcs x21, x22, x12 // t[15] += hi(a[0] * a[14])
adcs x22, x20, x13 // t[16] = hi(a[0] * a[15]) + CF
cbnz x19, .LSqr8xHighMulProces // When exiting the loop, x0 = &a[8], x2 = &t[16]
sub x16, x1, x3 // x3 = x1 + x5 * 8(Converted to bytes), When x1 = x3, the loop ends.
cbnz x16, .LSqr8xHighMulBegian // x0 is the outer loop, x1 is the inner loop, and the inner loop ends.
// In this case, x2 = &a[size], out-of-bounds position.
mov x1, x0 // Outer Loop Increment, x1 = &a[16]
ldp x14 , x15 , [x1], #16 // x14 = a[8] , x15 = a[9]
ldp x16 , x17 , [x1], #16 // x16 = a[10], x17 = a[11]
ldp x6, x7, [x1], #16 // x6 = a[12], x7 = a[13]
ldp x8, x9, [x1], #16 // x8 = a[14], x9 = a[15]
sub x10, x3 , x1 // Check whether the outer loop ends, x3 = &a[size], x10 = (size - 16)*8
cbz x10, .LSqr8xLoopMul
sub x11, x2 , x10 // x2 = &t[24], x11 = &t[16]
stp x27, x28, [x2 , #8*0] // t[24] = x27, t[25] = x28
ldp x27, x28, [x11, #8*0] // x27 = t[16], x28 = t[17]
stp x25, x26, [x2 , #8*2] // t[26] = x25, t[27] = x26
ldp x25, x26, [x11, #8*2] // x25 = t[18], x26 = t[19]
stp x23, x24, [x2 , #8*4] // t[28] = x23, t[29] = x24
ldp x23, x24, [x11, #8*4] // x23 = t[20], x24 = t[21]
stp x21, x22, [x2 , #8*6] // t[30] = x21, t[31] = x22
ldp x21, x22, [x11, #8*6] // x21 = t[22], x22 = t[23]
mov x2 , x11 // x2 = &t[16]
b .LSqr8xLoopMul
.align 4
.LSqr8xLoopMulEnd:
//===== Calculate the squared term =====
//----- sp = &t[0] , x2 = &t[24]-----
sub x10, x3, x5 // x10 = a[0]
stp x27, x28, [x2, #8*0] // t[24] = x27, t[25] = x28
stp x25, x26, [x2, #8*2] // When this step is performed, the calculation results reserved for x27–x26
// are not pushed to the stack.
stp x23, x24, [x2, #8*4]
stp x21, x22, [x2, #8*6]
ldp x11, x12, [sp, #8*1] // x11 = t[1], x12 = t[2]
ldp x15, x17, [x10], #16 // x15 = a[0], x17 = a[1]
ldp x7, x9, [x10], #16 // x7 = a[2], x9 = a[3]
mov x1, x10
ldp x13, x10, [sp, #8*3] // x13 = t[3], x10 = t[4]
mul x27, x15, x15 // x27 = lo(a[0] * a[0])
umulh x15, x15, x15 // x15 = hi(a[0] * a[0])
mov x2 , sp // x2 = sp = &t[0]
mul x16, x17, x17 // x16 = lo(a[1] * a[1])
adds x28, x15, x11, lsl#1 // x28 = x15 + (x11 * 2) = hi(a[0] * a[0]) + 2 * t[1]
umulh x17, x17, x17 // x17 = hi(a[1] * a[1])
extr x11, x12, x11, #63 // Lower 63 bits of x11 = x16 | most significant bit of x15
// Cyclic right shift by 63 bits to obtain the lower bit,
// which is equivalent to cyclic left shift by 1 bit to obtain the upper bit.
// The purpose is to *2.
// x11 = 2*t[2](Ignore the overflowed part) + carry of (2*t[1])
mov x19, x5 // x19 = size*8
.LSqr8xDealSquare:
adcs x25, x16 , x11 // x25 = lo(a[1] * a[1]) + 2*t[2]
extr x12, x13, x12, #63 // x12 = 2*t[3](Ignore the overflowed part) + carry of (2*t[2])
adcs x26, x17 , x12 // x26 = hi(a[1] * a[1]) + 2*t[3]
sub x19, x19, #8*4 // x19 = (size - 8)*8
stp x27, x28, [x2], #16 // t[0~3]Re-push stack
stp x25, x26, [x2], #16
mul x6, x7, x7 // x6 = lo(a[2] * a[2])
umulh x7, x7, x7 // x7 = hi(a[2] * a[2])
mul x8, x9, x9 // x6 = lo(a[3] * a[3])
umulh x9, x9, x9 // x7 = hi(a[3] * a[3])
ldp x11, x12, [x2, #8] // x11 = t[5], x12 = t[6]
extr x13, x10, x13, #63 // x13 = 2*t[4](Ignore the overflowed part) + carry of(2*t[3])
extr x10, x11, x10, #63 // x10 = 2*t[5](Ignore the overflowed part) + carry of(2*t[4])
adcs x23, x6, x13 // x23 = lo(a[2] * a[2]) + 2*t[4]
adcs x24, x7, x10 // x24 = hi(a[2] * a[2]) + 2*t[5]
cbz x19, .LSqr8xReduceStart
ldp x13, x10, [x2, #24] // x13 = t[7], x10 = t[8]
extr x11, x12, x11, #63 // x11 = 2*t[6](Ignore the overflowed part) + carry of(2*t[5])
extr x12, x13, x12, #63 // x12 = 2*t[7](Ignore the overflowed part) + carry of(2*t[6])
adcs x21, x8, x11 // x21 = lo(a[3] * a[3]) + 2*t[6]
adcs x22, x9, x12 // x22 = hi(a[3] * a[3]) + 2*t[7]
stp x23, x24, [x2], #16 // t[4~7]re-push stack
stp x21, x22, [x2], #16
ldp x15, x17, [x1], #8*2 // x15 = a[4], x17 = a[5], x1 += 16 = &a[6]
ldp x11, x12, [x2, #8] // x11 = t[9], x12 = t[10]
mul x14 , x15 , x15 // x14 = lo(a[4] * a[4])
umulh x15 , x15 , x15 // x15 = hi(a[4] * a[4])
mul x16 , x17 , x17 // x16 = lo(a[5] * a[5])
umulh x17 , x17 , x17 // x17 = hi(a[5] * a[5])
extr x13, x10, x13, #63 // x13 = 2*t[8](Ignore the overflowed part) + carry of(2*t[7])
adcs x27, x14 , x13 // x27 = lo(a[4] * a[4]) + 2*t[8]
extr x10, x11, x10, #63 // x10 = 2*t[9](Ignore the overflowed part) + carry of(2*t[8])
adcs x28, x15 , x10 // x28 = hi(a[4] * a[4]) + 2*t[9]
extr x11, x12, x11, #63 // x11 = 2*t[10](Ignore the overflowed part) + carry of(2*t[9])
ldp x13, x10, [x2, #8*3] // Line 438 has obtained t[9] and t[10], x13 = &t[11], x10 = &t[12]
ldp x7, x9, [x1], #8*2 // x7 = a[6], x9 = a[7], x1 += 16 = &a[8]
b .LSqr8xDealSquare
.LSqr8xReduceStart:
extr x11, x12, x11, #63 // x11 = 2*t[2*size-2](Ignore the overflowed part) + carry of (2*t[2*size-3])
adcs x21, x8, x11 // x21 = lo(a[size-1] * a[size-1]) + 2*t[2*size-2]
extr x12, xzr, x12, #63 // x12 = 2*t[2*size-1](Ignore the overflowed part) + carry of (2*t[2*size-2])
adc x22, x9, x12 // x22 = hi(a[size-1] * a[size-1]) + 2*t[2*size-1]
ldp x1, x4, [x29, #104] // Pop n and k0 out of the stack, x1 = &n[0], x4 = k0
stp x23, x24, [x2] // t[2*size-4 ~ 2*size-1]re-push stack
stp x21, x22, [x2,#8*2]
cmp x5, #64 // if size == 8, we can goto Single step reduce
b.ne .LSqr8xReduceLoop
ldp x14 , x15 , [x1], #16 // x14~x9 = n[0~7]
ldp x16 , x17 , [x1], #16
ldp x6, x7, [x1], #16
ldp x8, x9, [x1], #16
ldp x27, x28, [sp] // x14~x9 = t[0~7]
ldp x25, x26, [sp,#8*2]
ldp x23, x24, [sp,#8*4]
ldp x21, x22, [sp,#8*6]
mov x19, #8
mov x2 , sp
// if size == 8, goto single reduce step
.LSqr8xSingleReduce:
mul x20, x4, x27
sub x19, x19, #1
//----- lo(n[1~7] * lo(t[0]*k0)) -----
mul x11, x15 , x20
mul x12, x16 , x20
mul x13, x17 , x20
mul x10, x6, x20
cmp x27, #1
adcs x27, x28, x11
adcs x28, x25, x12
adcs x25, x26, x13
adcs x26, x23, x10
mul x11, x7, x20
mul x12, x8, x20
mul x13, x9, x20
//----- hi(n[0~7] * lo(t[0]*k0)) -----
adcs x23, x24, x11
adcs x24, x21, x12
adcs x21, x22, x13
adc x22, xzr, xzr // x22 += CF
umulh x10, x14 , x20
umulh x11, x15 , x20
umulh x12, x16 , x20
umulh x13, x17 , x20
adds x27, x27, x10
adcs x28, x28, x11
adcs x25, x25, x12
adcs x26, x26, x13
umulh x10, x6, x20
umulh x11, x7, x20
umulh x12, x8, x20
umulh x13, x9, x20
adcs x23, x23, x10
adcs x24, x24, x11
adcs x21, x21, x12
adc x22, x22, x13
cbnz x19, .LSqr8xSingleReduce // Need cycle 8 times
ldp x10, x11, [x2, #64] // x10 = t[8], x11 = t[9]
ldp x12, x13, [x2, #80]
adds x27, x27, x10
adcs x28, x28, x11
ldp x10, x11, [x2, #96]
adcs x25, x25, x12
adcs x26, x26, x13
adcs x23, x23, x10
ldp x12, x13, [x2, #112]
adcs x24, x24, x11
adcs x21, x21, x12
adcs x22, x22, x13
adc x20, xzr, xzr
ldr x0, [x29, #96] // r Pop-Stack
// t - mod
subs x14, x27, x14
sbcs x15, x28, x15
sbcs x16, x25, x16
sbcs x17, x26, x17
sbcs x6, x23, x6
sbcs x7, x24, x7
sbcs x8, x21, x8
sbcs x9, x22, x9
sbcs x20, x20, xzr // determine whether there is a borrowing
// according to CF choose our result
csel x14 , x27, x14 , lo
csel x15 , x28, x15 , lo
csel x16 , x25, x16 , lo
csel x17 , x26, x17 , lo
stp x14 , x15 , [x0, #8*0]
csel x6, x23, x6, lo
csel x7, x24, x7, lo
stp x16 , x17 , [x0, #8*2]
csel x8, x21, x8, lo
csel x9, x22, x9, lo
stp x6, x7, [x0, #8*4]
stp x8, x9, [x0, #8*6]
b .LMontSqr8xEnd
.LSqr8xReduceLoop:
add x3, x1, x5 // x3 = &n[size]
mov x30, xzr
ldp x14 , x15 , [x1], #16 // x14~x9 = n[0~7]
ldp x16 , x17 , [x1], #16
ldp x6, x7, [x1], #16
ldp x8, x9, [x1], #16
ldp x27, x28, [sp] // x27 = t[0], x28 = t[1]
ldp x25, x26, [sp,#8*2] // x25~x22 = t[2~7]
ldp x23, x24, [sp,#8*4]
ldp x21, x22, [sp,#8*6]
mov x19, #8
mov x2 , sp
.LSqr8xReduceProcess:
mul x20, x4, x27 // x20 = lo(k0 * t[0])
sub x19, x19, #1
//----- lo(n[1~7] * lo(t[0]*k0)) -----
mul x11, x15, x20 // x11 = n[1] * lo(t[0]*k0)
mul x12, x16, x20 // x12 = n[2] * lo(t[0]*k0)
mul x13, x17, x20 // x13 = n[3] * lo(t[0]*k0)
mul x10, x6, x20 // x10 = n[4] * lo(t[0]*k0)
str x20, [x2], #8 // Push lo(t[0]*k0) on the stack., x2 += 8
cmp x27, #1 // Check whether the low location is carried.
adcs x27, x28, x11 // x27 = t[1] + lo(n[1] * lo(t[0]*k0))
adcs x28, x25, x12 // x28 = t[2] + lo(n[2] * lo(t[0]*k0))
adcs x25, x26, x13 // x25 = t[3] + lo(n[3] * lo(t[0]*k0))
adcs x26, x23, x10 // x26 = t[4] + lo(n[4] * lo(t[0]*k0))
mul x11, x7, x20
mul x12, x8, x20
mul x13, x9, x20
//----- hi(n[0~7] * lo(t[0]*k0)) -----
adcs x23, x24, x11 // x23 = t[5] + lo(n[5] * lo(t[0]*k0))
adcs x24, x21, x12 // x24 = t[6] + lo(n[6] * lo(t[0]*k0))
adcs x21, x22, x13 // x21 = t[7] + lo(n[7] * lo(t[0]*k0))
adc x22, xzr, xzr // x22 += CF
umulh x10, x14 , x20
umulh x11, x15 , x20
umulh x12, x16 , x20
umulh x13, x17 , x20
adds x27, x27, x10 // x27 += hi(n[0] * lo(t[0]*k0))
adcs x28, x28, x11 // x28 += hi(n[1] * lo(t[0]*k0))
adcs x25, x25, x12 // x25 += hi(n[2] * lo(t[0]*k0))
adcs x26, x26, x13 // x26 += hi(n[3] * lo(t[0]*k0))
umulh x10, x6, x20
umulh x11, x7, x20
umulh x12, x8, x20
umulh x13, x9, x20
adcs x23, x23, x10 // x23 += hi(n[4] * lo(t[0]*k0))
adcs x24, x24, x11 // x24 += hi(n[5] * lo(t[0]*k0))
adcs x21, x21, x12 // x21 += hi(n[6] * lo(t[0]*k0))
adc x22, x22, x13 // x22 += hi(n[7] * lo(t[0]*k0))
cbnz x19, .LSqr8xReduceProcess // Cycle 8 times, and at the end of the cycle, x2 += 8*8
ldp x10, x11, [x2, #8*0] // x10 = t[8], x11 = t[9]
ldp x12, x13, [x2, #8*2]
mov x0, x2
adds x27, x27, x10
adcs x28, x28, x11
ldp x10, x11, [x2,#8*4]
adcs x25, x25, x12
adcs x26, x26, x13
ldp x12, x13, [x2,#8*6]
adcs x23, x23, x10
adcs x24, x24, x11
adcs x21, x21, x12
adcs x22, x22, x13
ldr x4 , [x2, #-8*8] // x4 = t[0]
ldp x14 , x15 , [x1], #16 // x14~x9 = &n[8]~&n[15]
ldp x16 , x17 , [x1], #16
ldp x6, x7, [x1], #16
ldp x8, x9, [x1], #16
mov x19, #-8*8
.LSqr8xReduce:
adc x20, xzr, xzr // x20 = CF
add x19, x19, #8
mul x10, x14 , x4
mul x11, x15 , x4
mul x12, x16 , x4
mul x13, x17 , x4
adds x27, x27, x10
adcs x28, x28, x11
adcs x25, x25, x12
adcs x26, x26, x13
mul x10, x6, x4
mul x11, x7, x4
mul x12, x8, x4
mul x13, x9, x4
adcs x23, x23, x10
adcs x24, x24, x11
adcs x21, x21, x12
adcs x22, x22, x13
umulh x10, x14 , x4
umulh x11, x15 , x4
umulh x12, x16 , x4
umulh x13, x17 , x4
adc x20, x20, xzr
str x27, [x2], #8 // x27 = t[8], x2 += 8
adds x27, x28, x10 // x27 = t[1] + lo(n[1] * lo(t[0]*k0))
adcs x28, x25, x11 // x28 = t[2] + lo(n[2] * lo(t[0]*k0))
adcs x25, x26, x12 // x25 = t[3] + lo(n[3] * lo(t[0]*k0))
adcs x26, x23, x13 // x26 = t[4] + lo(n[4] * lo(t[0]*k0))
umulh x10, x6, x4
umulh x11, x7, x4
umulh x12, x8, x4
umulh x13, x9, x4
// x0 = &t[8]
ldr x4 , [x0, x19]
adcs x23, x24, x10
adcs x24, x21, x11
adcs x21, x22, x12
adcs x22, x20, x13
cbnz x19, .LSqr8xReduce
ldp x14 , x15 , [x2, #8*0]
ldp x16 , x17 , [x2, #8*2]
sub x19, x3, x1 // x19 = (size-16)*8
ldp x6, x7, [x2, #8*4]
ldp x8, x9, [x2, #8*6]
cbz x19, .LSqr8xReduceBreak
ldr x4 , [x0, #-8*8]
adds x27, x27, x14
adcs x28, x28, x15
adcs x25, x25, x16
adcs x26, x26, x17
adcs x23, x23, x6
adcs x24, x24, x7
adcs x21, x21, x8
adcs x22, x22, x9
ldp x14 , x15 , [x1], #16
ldp x16 , x17 , [x1], #16
ldp x6, x7, [x1], #16
ldp x8, x9, [x1], #16
mov x19, #-8*8
b .LSqr8xReduce
.align 4
.LSqr8xReduceBreak:
sub x12, x3, x5 // x12 = n, reassign to n
ldr x4 , [x29, #112] // k0 pop-stack
cmp x30, #1 // Check whether the low location is carried.
adcs x10, x27, x14
adcs x11, x28, x15
stp x10, x11, [x2] , #16
ldp x27 ,x28, [x0 , #8*0]
ldp x14 , x15, [x12], #16 // x12 = &n[0] (Line 638 assigns a value)
adcs x25, x25, x16
adcs x26, x26, x17
adcs x23, x23, x6
adcs x24, x24, x7
adcs x21, x21, x8
adcs x22, x22, x9
adc x30, xzr, xzr
ldp x16, x17, [x12], #16
ldp x6, x7, [x12], #16
ldp x8, x9, [x12], #16
stp x25, x26, [x2], #16
ldp x25, x26, [x0, #8*2]
stp x23, x24, [x2], #16
ldp x23, x24, [x0, #8*4]
stp x21, x22, [x2], #16
ldp x21, x22, [x0, #8*6]
sub x20, x2, x29 // Check whether the loop ends
mov x1, x12
mov x2, x0 // sliding window
mov x19, #8
cbnz x20, .LSqr8xReduceProcess
// Final step
ldr x0 , [x29, #96] // r Pop-Stack
add x2 , x2 , #8*8
subs x10, x27, x14
sbcs x11, x28, x15
sub x19, x5 , #8*8
mov x3 , x0 // backup x0
.LSqr8xSubMod:
ldp x14 , x15, [x1], #16
sbcs x12, x25, x16
sbcs x13, x26, x17
ldp x16 , x17 , [x1], #16
stp x10, x11, [x0], #16
stp x12, x13, [x0], #16
sbcs x10, x23, x6
sbcs x11, x24, x7
ldp x6, x7, [x1], #16
sbcs x12, x21, x8
sbcs x13, x22, x9
ldp x8, x9, [x1], #16
stp x10, x11, [x0], #16
stp x12, x13, [x0], #16
ldp x27, x28, [x2], #16
ldp x25, x26, [x2], #16
ldp x23, x24, [x2], #16
ldp x21, x22, [x2], #16
sub x19, x19, #8*8
sbcs x10, x27, x14
sbcs x11, x28, x15
cbnz x19, .LSqr8xSubMod
mov x2 , sp
add x1 , sp , x5
sbcs x12, x25, x16
sbcs x13, x26, x17
stp x12, x13, [x0, #8*2]
stp x10, x11, [x0, #8*0]
sbcs x10, x23, x6
sbcs x11, x24, x7
stp x10, x11, [x0, #8*4]
sbcs x12, x21, x8
sbcs x13, x22, x9
stp x12, x13, [x0, #8*6]
sbcs xzr, x30, xzr // Determine whether there is a borrowing
.LSqr8xCopy:
ldp x14, x15, [x3, #8*0]
ldp x16, x17, [x3, #8*2]
ldp x6, x7, [x3, #8*4]
ldp x8, x9, [x3, #8*6]
ldp x27, x28, [x1], #16
ldp x25, x26, [x1], #16
ldp x23, x24, [x1], #16
ldp x21, x22, [x1], #16
sub x5, x5, #8*8
csel x10, x27, x14, lo // Condition selection instruction, lo = less than,
// equivalent to x14 = (conf==lo) ? x27 : x14
csel x11, x28, x15, lo
csel x12, x25, x16 , lo
csel x13, x26, x17, lo
csel x14, x23, x6, lo
csel x15, x24, x7, lo
csel x16, x21, x8, lo
csel x17, x22, x9, lo
stp x10, x11, [x3], #16
stp x12, x13, [x3], #16
stp x14, x15, [x3], #16
stp x16, x17, [x3], #16
cbnz x5, .LSqr8xCopy
.LMontSqr8xEnd:
ldr x30, [x29, #8] // Pop-Stack
ldp x27, x28, [x29, #16]
mov sp , x29
ldp x25, x26, [x29, #32]
mov x0 , #1
ldp x23, x24, [x29, #48]
ldp x21, x22, [x29, #64]
ldp x19, x20, [x29, #80] // x19 = [x29 + 80], x20 = [x29 + 80 + 8],
// ldp reads two 8-byte memory blocks at a time.
ldr x29, [sp], #128 // x29 = [sp], sp = sp + 128,ldr reads only an 8-byte block of memory
AARCH64_AUTIASP
ret
.size MontSqr8, .-MontSqr8
.type MontMul4, %function
MontMul4:
AARCH64_PACIASP
stp x29, x30, [sp, #-128]!
mov x29, sp
stp x27, x28, [sp, #16]
stp x25, x26, [sp, #32]
stp x23, x24, [sp, #48]
stp x21, x22, [sp, #64]
stp x19, x20, [sp, #80]
mov x27, xzr
mov x28, xzr
mov x25, xzr
mov x26, xzr
mov x30, xzr
lsl x5 , x5 , #3
sub x22, sp , x5
sub sp , x22, #8*4 // The space of size + 4 is applied for
mov x22, sp
sub x6, x5, #32
cbnz x6, .LMul4xProcesStart
ldp x14 , x15 , [x1, #8*0]
ldp x16 , x17 , [x1, #8*2] // x14~x17 = a[0~3]
ldp x10, x11, [x3] // x10~x13 = n[0~3]
ldp x12, x13, [x3, #8*2]
mov x1 , xzr
mov x20, #4
// if size == 4, goto single step
.LMul4xSingleStep:
sub x20, x20, #0x1
ldr x24, [x2], #8 // b[i]
//----- lo(a[0~3] * b[0]) -----
mul x6, x14 , x24
mul x7, x15 , x24
mul x8, x16 , x24
mul x9, x17 , x24
//----- hi(a[0~3] * b[0]) -----
adds x27, x27, x6
umulh x6, x14 , x24
adcs x28, x28, x7
umulh x7, x15 , x24
adcs x25, x25, x8
umulh x8, x16 , x24
adcs x26, x26, x9
umulh x9, x17 , x24
mul x21, x27, x4
adc x23, xzr, xzr
//----- lo(n[0~3] * t[0]*k0) -----
adds x28, x28, x6
adcs x25, x25, x7
mul x7, x11, x21
adcs x26, x26, x8
mul x8, x12, x21
adc x23, x23, x9
mul x9, x13, x21
cmp x27, #1
adcs x27, x28, x7
//----- hi(n[0~3] *t[0]*k0) -----
umulh x6, x10, x21
umulh x7, x11, x21
adcs x28, x25, x8
umulh x8, x12, x21
adcs x25, x26, x9
umulh x9, x13, x21
adcs x26, x23, x1
adc x1 , xzr, xzr
adds x27, x27, x6
adcs x28, x28, x7
adcs x25, x25, x8
adcs x26, x26, x9
adc x1 , x1 , xzr
cbnz x20, .LMul4xSingleStep
subs x14 , x27, x10
sbcs x15 , x28, x11
sbcs x16 , x25, x12
sbcs x17 , x26, x13
sbcs xzr, x1 , xzr // update CF, Determine whether to borrow
csel x14 , x27, x14 , lo
csel x15 , x28, x15 , lo
csel x16 , x25, x16 , lo
csel x17 , x26, x17 , lo
stp x14 , x15 , [x0, #8*0]
stp x16 , x17 , [x0, #8*2]
b .LMontMul4xEnd
.LMul4xProcesStart:
add x6, x5, #32
eor v0.16b,v0.16b,v0.16b
eor v1.16b,v1.16b,v1.16b
.LMul4xInitstack:
sub x6, x6, #32
st1 {v0.2d, v1.2d}, [x22], #32
cbnz x6, .LMul4xInitstack
mov x22, sp
add x6, x2 , x5 // x6 = &b[size]
adds x19, x1 , x5 // x19 = &a[size]
stp x0 , x6, [x29, #96] // r and b[size] push stack
mov x0 , xzr
mov x20, #0
.LMul4xLoopProces:
ldr x24, [x2] // x24 = b[0]
ldp x14 , x15 , [x1], #16
ldp x16 , x17 , [x1], #16 // x14~x17 = a[0~3]
ldp x10 , x11 , [x3], #16
ldp x12 , x13 , [x3], #16 // x10~x13 = n[0~3]
.LMul4xPrepare:
//----- lo(a[0~3] * b[0]) -----
adc x0 , x0 , xzr // x0 += CF
add x20, x20, #8
and x20, x20, #31 // x20 &= 0xff. The lower eight bits are used.
// When x28 = 32, the instruction becomes 0.
mul x6, x14 , x24
mul x7, x15 , x24
mul x8, x16 , x24
mul x9, x17 , x24
//----- hi(a[0~3] * b[0]) -----
adds x27, x27, x6 // t[0] += lo(a[0] * b[0])
adcs x28, x28, x7 // t[1] += lo(a[1] * b[0])
adcs x25, x25, x8 // t[2] += lo(a[2] * b[0])
adcs x26, x26, x9 // t[3] += lo(a[3] * b[0])
umulh x6, x14 , x24 // x6 = hi(a[0] * b[0])
umulh x7, x15 , x24
umulh x8, x16 , x24
umulh x9, x17 , x24
mul x21, x27, x4 // x21 = t[0] * k0, t[0]*k0 needs to be recalculated in each round.
// t[0] is different in each round.
adc x23, xzr, xzr // t[4] += CF(set by t[3] += lo(a[3] * b[0]) )
ldr x24, [x2, x20] // b[i]
str x21, [x22], #8 // t[0] * k0 push stack, x22 += 8
//----- lo(n[0~3] * t[0]*k0) -----
adds x28, x28, x6 // t[1] += hi(a[0] * b[0])
adcs x25, x25, x7 // t[2] += hi(a[1] * b[0])
adcs x26, x26, x8 // t[3] += hi(a[2] * b[0])
adc x23, x23, x9 // t[4] += hi(a[3] * b[0])
mul x7, x11, x21 // x7 = lo(n[1] * t[0]*k0)
mul x8, x12, x21 // x8 = lo(n[2] * t[0]*k0)
mul x9, x13, x21 // x9 = lo(n[3] * t[0]*k0)
cmp x27, #1
adcs x27, x28, x7 // (t[0]) = x27 = t[1] + lo(n[1] * t[0]*k0),
// Perform S/R operations, r=2^64, shift right 64 bits.
//----- hi(n[0~3] *t[0]*k0) -----
adcs x28, x25, x8 // x28 = t[2] + lo(n[2] * t[0]*k0)
adcs x25, x26, x9 // x25 = t[3] + lo(n[3] * t[0]*k0)
adcs x26, x23, x0 // x26 = t[4] + 0 + CF
adc x0 , xzr, xzr // x0 = CF
umulh x6, x10, x21 // x6 = hi(n[0] * t[0]*k0)
umulh x7, x11, x21 // x7 = hi(n[1] * t[0]*k0)
umulh x8, x12, x21 // x8 = hi(n[2] * t[0]*k0)
umulh x9, x13, x21 // x9 = hi(n[3] * t[0]*k0)
adds x27, x27, x6 // x27 = t[1] + hi(n[0] * t[0]*k0)
adcs x28, x28, x7 // x28 = t[2] + hi(n[1] * t[0]*k0)
adcs x25, x25, x8 // x25 = t[3] + hi(n[2] * t[0]*k0)
adcs x26, x26, x9 // x26 = t[4] + hi(n[3] * t[0]*k0)
cbnz x20, .LMul4xPrepare
// Four t[0] * k0s are stacked in each loop.
adc x0 , x0 , xzr
ldp x6, x7, [x22, #8*4] // load A (cal before)
ldp x8, x9, [x22, #8*6]
adds x27, x27, x6
adcs x28, x28, x7
adcs x25, x25, x8
adcs x26, x26, x9
ldr x21, [sp] // x21 = t[0] * k0
.LMul4xReduceBegin:
ldp x14 , x15 , [x1], #16
ldp x16 , x17 , [x1], #16 // x14~x17 = a[4~7]
ldp x10 , x11 , [x3], #16
ldp x12 , x13 , [x3], #16 // n[4~7]
.LMul4xReduceProces:
adc x0 , x0 , xzr
add x20, x20, #8
and x20, x20, #31
//----- lo(a[4~7] * b[i]) -----
mul x6, x14 , x24
mul x7, x15 , x24
mul x8, x16 , x24
mul x9, x17 , x24
//----- hi(a[4~7] * b[i]) -----
adds x27, x27, x6 // x27 += lo(a[4~7] * b[i])
adcs x28, x28, x7
adcs x25, x25, x8
adcs x26, x26, x9
adc x23, xzr, xzr
umulh x6, x14 , x24
umulh x7, x15 , x24
umulh x8, x16 , x24
umulh x9, x17 , x24
ldr x24, [x2, x20] // b[i]
//----- lo(n[4~7] * t[0]*k0) -----
adds x28, x28, x6
adcs x25, x25, x7
adcs x26, x26, x8
adc x23, x23, x9
mul x6, x10, x21
mul x7, x11, x21
mul x8, x12, x21
mul x9, x13, x21
//----- hi(n[4~7] * t[0]*k0) -----
adds x27, x27, x6
adcs x28, x28, x7
adcs x25, x25, x8
adcs x26, x26, x9
adcs x23, x23, x0
umulh x6, x10, x21
umulh x7, x11, x21
umulh x8, x12, x21
umulh x9, x13, x21
ldr x21, [sp, x20] // t[0]*k0
adc x0 , xzr, xzr // x0 = CF, record carry
str x27, [x22], #8 // s[i] the calculation is complete, write the result, x22 += 8
adds x27, x28, x6
adcs x28, x25, x7
adcs x25, x26, x8
adcs x26, x23, x9
cbnz x20, .LMul4xReduceProces
sub x6, x19, x1 // x6 = &a[size] - &a[i]
// (The value of x1 increases cyclically.) Check whether the loop ends
adc x0 , x0 , xzr
cbz x6, .LMul4xLoopExitCheck
ldp x6, x7, [x22, #8*4] // t[4~7]
ldp x8, x9, [x22, #8*6]
adds x27, x27, x6
adcs x28, x28, x7
adcs x25, x25, x8
adcs x26, x26, x9
b .LMul4xReduceBegin
.LMul4xLoopExitCheck:
ldr x9, [x29, #104] // b[size] Pop-Stack
add x2 , x2 , #8*4 // b subscript is offset once by 4 each time, &b[4], &b[8]......
sub x9 , x9, x2 // Indicates whether the outer loop ends.
adds x27, x27, x30
adcs x28, x28, xzr
stp x27, x28, [x22, #8*0] // x27, x20 After the calculation is complete, Push-stack storage
ldp x27, x28, [sp , #8*4] // t[0], t[1]
adcs x25, x25, xzr
adcs x26, x26, xzr
stp x25, x26, [x22, #8*2] // x25, x22 After the calculation is complete, Push-stack storage
ldp x25, x26, [sp , #8*6] // t[2], t[3]
adc x30, x0 , xzr
sub x3 , x3 , x5 // x1 = &n[0]
cbz x9, .LMul4xEnd
sub x1 , x1 , x5 // x1 = &a[0]
mov x22, sp
mov x0 , xzr
b .LMul4xLoopProces
.LMul4xEnd:
ldp x10, x11, [x3], #16
ldp x12, x13, [x3], #16
ldr x0, [x29, #96] // r[0] Pop-Stack
mov x19, x0 // backup, x19 = &r[0]
subs x6, x27, x10 // t[0] - n[0], modify CF
sbcs x7, x28, x11 // t[1] - n[1] - CF
add x22, sp , #8*8 // x22 = &S[8]
sub x20, x5 , #8*4 // x20 = (size - 4)*8
// t - n, x22 = &t[8], x3 = &n[0]
.LMul4xSubMod:
sbcs x8, x25, x12
sbcs x9, x26, x13
ldp x10, x11, [x3], #16
ldp x12, x13, [x3], #16
ldp x27, x28, [x22], #16
ldp x25, x26, [x22], #16
sub x20, x20, #8*4
stp x6, x7, [x0], 16
stp x8, x9, [x0], 16
sbcs x6, x27, x10
sbcs x7, x28, x11
cbnz x20, .LMul4xSubMod
sbcs x8, x25, x12
sbcs x9, x26, x13
sbcs xzr, x30, xzr // CF = x30 - CF, x30 recorded the previous carry
add x1 , sp , #8*4 // The size of the SP space is size + 4., x1 = sp + 4
stp x6, x7, [x0]
stp x8, x9, [x0, #8*2]
.LMul4xCopy:
ldp x14 , x15 , [x19] // x14~x17 = r[0~3]
ldp x16 , x17 , [x19, #8*2]
ldp x27, x28, [x1], #16 // x27~22 = S[4~7]
ldp x25, x26, [x1], #16
sub x5, x5, #8*4
csel x6, x27, x14, lo // x6 = (CF == 1) ? x27 : x14
csel x7, x28, x15, lo
csel x8, x25, x16, lo
csel x9, x26, x17, lo
stp x6, x7, [x19], #16
stp x8, x9, [x19], #16
cbnz x5, .LMul4xCopy
.LMontMul4xEnd:
ldr x30, [x29, #8] // Value Pop-Stack in x30 (address of next instruction)
ldp x27, x28, [x29, #16]
ldp x25, x26, [x29, #32]
ldp x23, x24, [x29, #48]
ldp x21, x22, [x29, #64]
ldp x19, x20, [x29, #80]
mov sp , x29
ldr x29, [sp], #128
AARCH64_AUTIASP
ret
.size MontMul4, .-MontMul4
#endif
| 2302_82127028/openHiTLS-examples_1508 | crypto/bn/src/asm/bn_mont_armv8.S | Unix Assembly | unknown | 51,548 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#include "hitls_build.h"
#ifdef HITLS_CRYPTO_BN
.file "bn_mont_x86_64.S"
.text
.macro ADD_CARRY a b
addq \a,\b
adcq $0,%rdx
.endm
.macro SAVE_REGISTERS
pushq %r15 // Save non-volatile register.
pushq %r14
pushq %r13
pushq %r12
pushq %rbp
pushq %rbx
.endm
.macro RESTORE_REGISTERS
popq %rbx // Restore non-volatile register.
popq %rbp
popq %r12
popq %r13
popq %r14
popq %r15
.endm
/*
* void MontMul_Asm(uint64_t *r, const uint64_t *a, const uint64_t *b,
* const uint64_t *n, const uint64_t k0, uint32_t size);
*/
.globl MontMul_Asm
.type MontMul_Asm,@function
.align 16
MontMul_Asm:
.cfi_startproc
testl $3,%r9d
jnz .LMontMul // If size is not divisible by 4, LMontMul.
cmpl $8,%r9d
jb .LMontMul // LMontMul
cmpq %rsi,%rdx
jne MontMul4 // if a != b, MontMul4
testl $7,%r9d
jz MontSqr8 // If size is divisible by 8,enter MontSqr8.
jmp MontMul4
.align 16
.LMontMul:
SAVE_REGISTERS // Save non-volatile register.
movq %rsp,%rax // rax stores the rsp
movq %r9, %r15
negq %r15 // r15 = -size
leaq -16(%rsp, %r15, 8), %r15 // r15 = rsp - size * 8 - 16
andq $-1024, %r15 // r15 The address is aligned down by 1 KB.
movq %rsp, %r14 // r14 = rsp
subq %r15,%r14 // __chkstk implemention, called when the stack size needs to exceed 4096.
// (the size of a page) to allocate more pages.
andq $-4096,%r14 // r14 4K down-align.
leaq (%r15,%r14),%rsp // rsp = r15 + r14
cmpq %r15,%rsp // If you want to allocate more than one page, go to Lmul_page_walk.
ja .LoopPage
jmp .LMulBody
.align 16
.LoopPage:
leaq -4096(%rsp),%rsp // rsp - 4096 each time until rsp < r15.
cmpq %r15,%rsp
ja .LoopPage
.LMulBody:
movq %rax,8(%rsp,%r9,8) // Save the original rsp in the stack.
movq %rdx,%r13 // r13 = b
xorq %r11,%r11 // r11 = 0
xorq %r10,%r10 // r10 = 0
movq (%r13),%rbx // rbx = b[0]
movq (%rsi),%rax // rax = a[0]
mulq %rbx // (rdx, rax) = a[0] * b[0]
movq %rax,%r15 // r15 = t[0] = lo(a[0] * b[0])
movq %rdx,%r14 // r14 = hi(a[0] * b[0])
movq %r8,%rbp // rbp = k0
imulq %r15,%rbp // rbp = t[0] * k0
movq (%rcx),%rax // rax = n[0]
mulq %rbp // (rdx, rax) = t[0] * k0 * n[0]
ADD_CARRY %rax,%r15 // r15 = lo(t[0] * k0 * n[0]) + t[0]
leaq 1(%r10),%r10 // j++
.Loop1st:
movq (%rsi,%r10,8),%rax // rax = a[j]
movq %rdx,%r12 // r12 = hi(t[0] * k0 * n[0])
mulq %rbx // (rdx, rax) = a[j] * b[0]
ADD_CARRY %rax,%r14 // r14 = hi(a[j - 1] * b[0]) + lo(a[j] * b[0])
movq %rdx,%r15 // r15 = hi(a[j] * b[0])
movq (%rcx,%r10,8),%rax // rax = n[j]
mulq %rbp // (rdx, rax) = t[0] * k0 * n[j]
leaq 1(%r10),%r10 // j++
cmpq %r9,%r10 // if j != size, loop L1st
je .Loop1stSkip
ADD_CARRY %rax,%r12 // r12 = hi(t[0] * k0 * n[j]) + lo(t[0] * k0 * n[j])
ADD_CARRY %r14,%r12 // r12 += lo(a[j] * b[0]) + hi(a[j] * b[0])
movq %r12,-16(%rsp,%r10,8) // t[j - 2] = r13
movq %r15,%r14 // r14 = hi(a[j] * b[0])
jmp .Loop1st
.Loop1stSkip:
ADD_CARRY %rax,%r12 // r12 = hi(t[0] * k0 * n[j - 1]) + lo(t[0] * k0 * n[j])
ADD_CARRY %r14,%r12 // r12 += hi(a[j - 1] * b[0]) + lo(a[j] * b[0])
movq %r12,-16(%rsp,%r10,8) // t[j - 2] = r13
movq %r15,%r14 // r14 = hi(a[j] * b[0])
movq %rdx,%r12 // r12 = hi(t[0] * k0 * n[j])
xorq %rdx,%rdx // rdx = 0, Clearing the CF.
ADD_CARRY %r14,%r12 // r12 = hi(t[0] * k0 * n[j]) + hi(a[j] * b[0])
movq %r12,-8(%rsp,%r9,8) // t[size - 1] = hi(t[0] * k0 * n[j]) + hi(a[j] * b[0]), save overflow bit.
movq %rdx,(%rsp,%r9,8)
leaq 1(%r11),%r11 // i++
.align 16
.LoopOuter:
xorq %r10,%r10 // j = 0
movq (%rsi),%rax // rax = a[0]
movq (%r13,%r11,8),%rbx // rbx = b[i]
mulq %rbx // (rdx, rax) = a[0] * b[i]
movq (%rsp),%r15 // r15 = lo(a[0] * b[i]) + t[0]
ADD_CARRY %rax,%r15
movq %rdx,%r14 // r14 = hi(a[0] * b[i])
movq %r8,%rbp // rbp = t[0] * k0
imulq %r15,%rbp
movq (%rcx),%rax // rax = n[0]
mulq %rbp // (rdx, rax) = t[0] * k0 * n[0]
ADD_CARRY %rax,%r15 // r15 = lo(t[0] * k0 * n[0])
leaq 1(%r10),%r10 // j++
.align 16
.LoopInner:
movq (%rsi,%r10,8),%rax // rax = a[j]
movq %rdx,%r12 // r12 = hi(t[0] * k0 * n[j])
movq (%rsp,%r10,8),%r15 // r15 = t[j]
mulq %rbx // (rdx, rax) = a[1] * b[i]
ADD_CARRY %rax,%r14 // r14 = hi(a[0] * b[i]) + lo(a[1] * b[i])
movq (%rcx,%r10,8),%rax // rax = n[j]
ADD_CARRY %r14,%r15 // r15 = a[j] * b[i] + t[j]
movq %rdx,%r14
leaq 1(%r10),%r10 // j++
mulq %rbp // (rdx, rax) = t[0] * k0 * n[j]
cmpq %r9,%r10 // if j != size, loop Linner
je .LoopInnerSkip
ADD_CARRY %rax,%r12 // r12 = t[0] * k0 * n[j]
ADD_CARRY %r15,%r12 // r12 = a[j] * b[i] + t[j] + n[j] * t[0] * k0
movq %r12,-16(%rsp,%r10,8) // t[j - 2] = r13
jmp .LoopInner
.LoopInnerSkip:
ADD_CARRY %rax,%r12 // r12 = t[0] * k0 * n[j]
ADD_CARRY %r15,%r12 // r12 = t[0] * k0 * n[j] + a[j] * b[i] + t[j]
movq (%rsp,%r10,8),%r15 // r15 = t[j]
movq %r12,-16(%rsp,%r10,8) // t[j - 2]
movq %rdx,%r12 // r12 = hi(t[0] * k0 * n[j])
xorq %rdx,%rdx // rdx 0
ADD_CARRY %r14,%r12 // r12 = hi(a[1] * b[i]) + hi(t[0] * k0 * n[j])
ADD_CARRY %r15,%r12 // r12 += t[j]
movq %r12,-8(%rsp,%r9,8) // t[size - 1] = r13
movq %rdx,(%rsp,%r9,8) // t[size] = CF
leaq 1(%r11),%r11 // i++
cmpq %r9,%r11 // if size < i (unsigned)
jne .LoopOuter
xorq %r11,%r11 // r11 = 0, clear CF.
movq (%rsp),%rax // rax = t[0]
movq %r9,%r10 // r10 = size
.align 16
.LoopSub:
sbbq (%rcx,%r11,8),%rax // r[i] = t[i] - n[i]
movq %rax,(%rdi,%r11,8)
movq 8(%rsp,%r11,8),%rax // rax = t[i + 1]
leaq 1(%r11),%r11 // i++
decq %r10 // j--
jnz .LoopSub // if j != 0
sbbq $0,%rax // rax -= CF
movq $-1,%rbx
xorq %rax,%rbx // rbx = !t[i + 1]
xorq %r11,%r11 // r11 = 0
movq %r9,%r10 // r10 = size
.LoopCopy:
movq (%rdi,%r11,8),%rcx // rcx = r[i] & t[i]
andq %rbx,%rcx
movq (%rsp,%r11,8),%rdx // rdx = CF & t[i]
andq %rax,%rdx
orq %rcx,%rdx
movq %rdx,(%rdi,%r11,8) // r[i] = t[i]
movq %r9,(%rsp,%r11,8) // t[i] = size
leaq 1(%r11),%r11 // i++
subq $1,%r10 // j--
jnz .LoopCopy // if j != 0
movq 8(%rsp,%r9,8),%rsi // rsi = pressed-stacked rsp.
movq $1,%rax // rax = 1
leaq (%rsi),%rsp // restore rsp.
RESTORE_REGISTERS // Restore non-volatile register.
ret
.cfi_endproc
.size MontMul_Asm,.-MontMul_Asm
.type MontMul4,@function
.align 16
MontMul4:
.cfi_startproc
SAVE_REGISTERS
movq %rsp,%rax // save rsp
movq %r9,%r15
negq %r15
leaq -32(%rsp,%r15,8),%r15 // Allocate space: size * 8 + 32 bytes.
andq $-1024,%r15
movq %rsp,%r14
subq %r15,%r14 // __chkstk implemention, called when the stack size needs to exceed 4096.
andq $-4096,%r14
leaq (%r15,%r14),%rsp
cmpq %r15,%rsp // If you want to allocate more than one page, go to LoopPage4x.
ja .LoopPage4x
jmp .LoopMul4x
.LoopPage4x:
leaq -4096(%rsp),%rsp // rsp - 4096each time until rsp >= r10.
cmpq %r15,%rsp
ja .LoopPage4x
.LoopMul4x:
xorq %r11,%r11 // i = 0
xorq %r10,%r10 // j = 0
movq %rax,8(%rsp,%r9,8)
movq %rdi,16(%rsp,%r9,8) // t[size + 2] = r
movq %rdx,%r13 // r13 = b
movq (%rsi),%rax // rax = a[0]
movq %r8,%rbp // rbp = k0
movq (%r13),%rbx // rbx = b[0]
/* 计算a[i] * b[0] * k[0] * n[j] */
mulq %rbx // (rdx, rax) = a[0] * b[0]
movq %rax,%r15 // r15 = t[0] = lo(b[0] * a[0])
movq (%rcx),%rax // rax = n[0]
imulq %r15,%rbp // rbp = t[0] * k0
movq %rdx,%r14 // r14 = hi(a[0] * b[0])
mulq %rbp // (rdx, rax) = t[0] * k0 * n[0]
ADD_CARRY %rax,%r15 // r15 = lo(t[0] * k0 * n[0])
movq 8(%rsi),%rax // rax = a[1]
movq %rdx,%rdi // rdi = hi(t[0] * k0 * n[0])
mulq %rbx // (rdx, rax) = a[1] * b[0]
ADD_CARRY %rax,%r14 // r14 = lo(a[1] * b[0]) + hi(a[0] * b[0])
movq 8(%rcx),%rax // rax = n[1]
movq %rdx,%r15 // r15 = hi(a[1] * b[0])
mulq %rbp // (rdx, rax) = t[0] * k0 * n[1]
ADD_CARRY %rax,%rdi // rdi = lo(t[0] * k0 * n[1]) + hi(t[0] * k0 * n[0])
movq 16(%rsi),%rax // rax = a[2]
ADD_CARRY %r14,%rdi // rdi += hi(a[0] * b[0]) + lo(a[1] * b[0])
movq %rdx,%r12 // r12 = hi(t[0] * k0 * n[1])
movq %rdi,(%rsp) // t[0] = rdi
leaq 4(%r10),%r10 // j += 4
.align 16
.Loop1st4x:
mulq %rbx // (rdx, rax) = a[2] * b[0]
ADD_CARRY %rax,%r15 // r15 = hi(a[1] * b[0]) + lo(a[2] * b[0])
movq -16(%rcx,%r10,8),%rax // rax = n[j - 2]
movq %rdx,%r14 // r14 = hi[a[2] * b[0]]
mulq %rbp // (rdx, rax) = t[0] * k0 * n[j - 2]
ADD_CARRY %rax,%r12 // r12 = hi(t[0] * k0 * n[1]) + lo(t[0] * k0 * n[j - 2])
movq -8(%rsi,%r10,8),%rax // rax = a[j - 1]
ADD_CARRY %r15,%r12 // r12 += hi(a[1] * b[0]) + lo(a[2] * b[0])
movq %r12,-24(%rsp,%r10,8) // t[j - 3] = r13
movq %rdx,%rdi // rdi = hi(t[0] * k0 * n[j - 2])
mulq %rbx // (rdx, rax) = a[j - 1] * b[0]
ADD_CARRY %rax,%r14 // r14 = hi[a[2] * b[0]] + lo(a[j - 1] * b[0])
movq -8(%rcx,%r10,8),%rax // rax = n[j - 1]
movq %rdx,%r15 // r15 = hi(a[j - 1] * b[0])
mulq %rbp // (rdx, rax) = t[0] * k0 * n[j - 1]
ADD_CARRY %rax,%rdi // rdi = hi(t[0] * k0 * n[j - 2]) + lo(t[0] * k0 * n[j - 1]
movq (%rsi,%r10,8),%rax // rax = a[j]
ADD_CARRY %r14,%rdi // rdi += hi[a[2] * b[0]] + lo(a[j - 1] * b[0])
movq %rdi,-16(%rsp,%r10,8) // t[j - 2] = rdi
movq %rdx,%r12 // r12 = hi(t[0] * k0 * n[j - 1])
mulq %rbx // (rdx, rax) = a[j] * b[0]
ADD_CARRY %rax,%r15 // r15 = hi(a[j - 1] * b[0]) + lo(a[j] * b[0])
movq (%rcx,%r10,8),%rax // rax = n[j]
movq %rdx,%r14 // r14 = hi(a[j] * b[0])
mulq %rbp // (rdx, rax) = t[0] * k0 * n[j]
ADD_CARRY %rax,%r12 // r12 = hi(t[0] * k0 * n[j - 1]) + lo(t[0] * k0 * n[j])
movq 8(%rsi,%r10,8),%rax // rax = a[j + 1]
ADD_CARRY %r15,%r12 // r12 += hi(a[j - 1] * b[0]) + lo(a[j] * b[0])
movq %r12,-8(%rsp,%r10,8) // t[j - 1] = r13
movq %rdx,%rdi // rdi = hi(t[0] * k0 * n[j])
mulq %rbx // (rdx, rax) = a[j + 1] * b[0]
ADD_CARRY %rax,%r14 // r14 = hi(a[j] * b[0]) + lo(a[j + 1] * b[0])
movq 8(%rcx,%r10,8),%rax // rax = n[j + 1]
movq %rdx,%r15 // r15 = hi(a[j + 1] * b[0])
mulq %rbp // (rdx, rax) = t[0] * k0 * n[j + 1]
ADD_CARRY %rax,%rdi // rdi = hi(t[0] * k0 * n[j]) + lo(t[0] * k0 * n[j + 1])
movq 16(%rsi,%r10,8),%rax // rax = a[j + 2]
ADD_CARRY %r14,%rdi // rdi += hi(a[j] * b[0]) + lo(a[j + 1] * b[0])
movq %rdi,(%rsp,%r10,8) // t[j - 4] = rdi
movq %rdx,%r12 // r12 = hi(t[0] * k0 * n[j + 1])
leaq 4(%r10),%r10 // j += 4
cmpq %r9,%r10 // if size != j
jb .Loop1st4x
mulq %rbx // (rdx, rax) = a[j - 2] * b[0]
ADD_CARRY %rax,%r15 // r15 = hi(a[j - 3] * b[0]) + lo(a[j - 2] * b[0])
movq -16(%rcx,%r10,8),%rax // rax = n[j - 2]
movq %rdx,%r14 // r14 = hi(a[j - 2] * b[0])
mulq %rbp // (rdx, rax) = t[0] * k0 * n[j - 2]
ADD_CARRY %rax,%r12 // r12 = hi(t[0] * k0 * n[j - 3]) + lo(t[0] * k0 * n[j - 2])
movq -8(%rsi,%r10,8),%rax // rax = a[j - 1]
ADD_CARRY %r15,%r12 // r12 += hi(a[j - 3] * b[0]) + lo(a[j - 2] * b[0])
movq %r12,-24(%rsp,%r10,8) // t[j - 3] = r13
movq %rdx,%rdi // rdi = hi(t[0] * k0 * n[j - 2])
mulq %rbx // (rdx, rax) = a[j - 1] * b[0]
ADD_CARRY %rax,%r14 // r14 = hi(a[j - 2] * b[0]) + lo(a[j- 1] * b[0])
movq -8(%rcx,%r10,8),%rax // rax = n[j - 1]
movq %rdx,%r15 // r15 = hi(a[j - 1] * b[0])
mulq %rbp // (rdx, rax) = t[0] * k0 * n[j - 1]
ADD_CARRY %rax,%rdi // rdi = hi(t[0] * k0 * n[j - 2]) + lo(t[0] * k0 * n[j - 1])
ADD_CARRY %r14,%rdi // rdi += hi(a[j - 2] * b[0]) + lo(a[j- 1] * b[0])
movq %rdi,-16(%rsp,%r10,8) // t[j - 2] = rdi
movq %rdx,%r12 // r12 = hi(t[0] * k0 * n[j - 1])
xorq %rdx,%rdx // rdx = 0
ADD_CARRY %r15,%r12 // r12 = hi(a[j - 1] * b[0]) + hi(t[0] * k0 * n[j - 1])
movq %r12,-8(%rsp,%r10,8) // t[j - 1] = r13
movq %rdx,(%rsp,%r10,8) // t[j] = CF
leaq 1(%r11),%r11 // i++
.align 4
.LoopOuter4x:
/* Calculate a[i] * b + q * N */
movq (%rsi),%rax // rax = a[0]
movq (%r13,%r11,8),%rbx // rbx = b[i]
xorq %r10,%r10 // j = 0
movq (%rsp),%r15 // r15 = t[0]
movq %r8,%rbp // rbp = k0
mulq %rbx // (rdx, rax) = a[0] * b[i]
ADD_CARRY %rax,%r15 // r15 = lo(a[0] * b[i]) + t[0]
movq (%rcx),%rax // rax = n[0]
imulq %r15,%rbp // rbp = t[0] * k0
movq %rdx,%r14 // r14 = hi[a[0] * b[i]]
mulq %rbp // (rdx, rax) = t[0] * k0 * n[0]
ADD_CARRY %rax,%r15 // r15 = lo(t[0] * k0 * n[0])
movq 8(%rsi),%rax // rax = a[1]
movq %rdx,%rdi // rdi = hi(t[0] * k0 * n[0])
mulq %rbx // (rdx, rax) = a[1] * b[i]
ADD_CARRY %rax,%r14 // r14 = hi[a[0] * b[i]] + lo(a[1] * b[i])
movq 8(%rcx),%rax // rax = n[1]
ADD_CARRY 8(%rsp),%r14 // r14 = t[1]
movq %rdx,%r15 // r15 = hi(a[1] * b[i])
mulq %rbp // (rdx, rax) = t[0] * k0 * n[1]
ADD_CARRY %rax,%rdi // rdi = hi(t[0] * k0 * n[0]) + lo(t[0] * k0 * n[1])
movq 16(%rsi),%rax // rax = a[2]
ADD_CARRY %r14,%rdi // rdi += t[1]
leaq 4(%r10),%r10 // j += 4
movq %rdi,(%rsp) // t[0] = rdi
movq %rdx,%r12 // r12 = hi(t[0] * k0 * n[1])
.align 16
.Linner4x:
mulq %rbx // (rdx, rax) = a[2] * b[i]
ADD_CARRY %rax,%r15 // r15 = hi(a[1] * b[i]) + lo(a[2] * b[i])
addq -16(%rsp,%r10,8),%r15 // r15 = t[j - 2]
adcq $0,%rdx
movq -16(%rcx,%r10,8),%rax // rax = n[j - 2]
movq %rdx,%r14 // r14 = hi(a[2] * b[i])
mulq %rbp // (rdx, rax) = t[0] * k0 * n[j - 2]
ADD_CARRY %rax,%r12 // r12 = hi(t[0] * k0 * n[j - 3]) * lo(t[0] * k0 * n[j - 2])
ADD_CARRY %r15,%r12 // r12 += t[j - 2]
movq %r12,-24(%rsp,%r10,8) // t[j - 3] = r13
movq %rdx,%rdi // rdi = hi(t[0] * k0 * n[j - 2])
movq -8(%rsi,%r10,8),%rax // rax = a[j - 1]
mulq %rbx // (rdx, rax) = a[j - 1] * b[i]
ADD_CARRY %rax,%r14 // r14 = lo(a[j - 1] * b[i])
addq -8(%rsp,%r10,8),%r14 // r14 += t[j - 1]
adcq $0,%rdx
movq %rdx,%r15 // r15 = hi(a[j - 1] * b[i])
movq -8(%rcx,%r10,8),%rax // rax = n[j - 1]
mulq %rbp // (rdx, rax) = t[0] * k0 * n[j - 1]
ADD_CARRY %rax,%rdi // rdi = hi(t[0] * k0 * n[j - 2]) + lo(t[0] * k0 * n[j - 1])
ADD_CARRY %r14,%rdi // rdi += t[j - 1]
movq %rdi,-16(%rsp,%r10,8) // t[j - 2] = rdi
movq %rdx,%r12 // r12 = hi(t[0] * k0 * n[j - 1])
movq (%rsi,%r10,8),%rax // rax = a[j]
mulq %rbx // (rdx, rax) = a[j] * b[i]
ADD_CARRY %rax,%r15 // r15 = hi(a[j - 1] * b[i]) + lo(a[j] * b[i])
addq (%rsp,%r10,8),%r15 // r15 = t[j]
adcq $0,%rdx
movq %rdx,%r14 // r14 = hi(a[j] * b[i])
movq (%rcx,%r10,8),%rax // rax = n[j]
mulq %rbp // (rdx, rax) = t[0] * k0 * n[j]
ADD_CARRY %rax,%r12 // r12 = hi(t[0] * k0 * n[j - 1]) + lo(t[0] * k0 * n[j])
ADD_CARRY %r15,%r12 // r12 += t[j]
movq %r12,-8(%rsp,%r10,8) // t[j - 1] = r13
movq %rdx,%rdi // rdi = hi(t[0] * k0 * n[j])
movq 8(%rsi,%r10,8),%rax // rax = a[j + 1]
mulq %rbx // (rdx, rax) = a[j + 1] * b[i]
ADD_CARRY %rax,%r14 // r14 = hi(a[j] * b[i]) + lo(a[j + 1] * b[i])
addq 8(%rsp,%r10,8),%r14 // r14 = t[j + 1]
adcq $0,%rdx
movq %rdx,%r15 // r15 = hi(a[j + 1] * b[i])
movq 8(%rcx,%r10,8),%rax // rax = n[j + 1]
mulq %rbp // (rdx, rax) = t[0] * k0 * n[j + 1]
ADD_CARRY %rax,%rdi // rdi = hi(t[0] * k0 * n[j]) + lo(t[0] * k0 * n[j + 1])
ADD_CARRY %r14,%rdi // rdi += t[j + 1]
movq %rdi,(%rsp,%r10,8) // t[j] = rdi
movq %rdx,%r12 // r12 = hi(t[0] * k0 * n[j + 1])
movq 16(%rsi,%r10,8),%rax // rax = a[j + 2]
leaq 4(%r10),%r10 // j += 4
cmpq %r9,%r10 // if j != size
jb .Linner4x
mulq %rbx // (rdx, rax) = a[j - 2] * b[i]
ADD_CARRY %rax,%r15 // r15 = hi(a[j + 1] * b[i]) + lo(a[j - 2] * b[i])
addq -16(%rsp,%r10,8),%r15 // r15 = t[j - 2]
adcq $0,%rdx
movq %rdx,%r14 // r14 = hi(a[j - 2] * b[i])
movq -16(%rcx,%r10,8),%rax // rax = n[j - 2]
mulq %rbp // (rdx, rax) = t[0] * k0 * n[j - 2]
ADD_CARRY %rax,%r12 // r12 = hi(t[0] * k0 * n[j + 1]) + lo(t[0] * k0 * n[j - 2])
ADD_CARRY %r15,%r12 // r12 += t[j - 2]
movq %r12,-24(%rsp,%r10,8) // t[j - 3] = r13
movq %rdx,%rdi // rdi = hi(t[0] * k0 * n[j - 2])
movq -8(%rsi,%r10,8),%rax // rax = a[j - 1]
mulq %rbx // (rdx, rax) = a[j - 1] * b[i]
ADD_CARRY %rax,%r14 // r14 = hi(a[j - 2] * b[i]) + lo(a[j - 1] * b[i])
addq -8(%rsp,%r10,8),%r14 // r14 = t[j - 1]
adcq $0,%rdx
leaq 1(%r11),%r11 // i++
movq %rdx,%r15 // r15 = hi(a[j - 1] * b[i])
movq -8(%rcx,%r10,8),%rax // rax = n[j - 1]
mulq %rbp // (rdx, rax) = t[0] * k0 * n[j - 1]
ADD_CARRY %rax,%rdi // rdi = hi(t[0] * k0 * n[j - 2]) + lo(t[0] * k0 * n[j - 1])
ADD_CARRY %r14,%rdi // rdi += t[j - 1]
movq %rdi,-16(%rsp,%r10,8) // t[j - 2] = rdi
movq %rdx,%r12 // r12 = hi(t[0] * k0 * n[j - 1])
xorq %rdx,%rdx // rdi = 0
ADD_CARRY %r15,%r12 // r12 = hi(t[0] * k0 * n[j - 1]) + r10
addq (%rsp,%r9,8),%r12 // r12 += t[size]
adcq $0,%rdx
movq %r12,-8(%rsp,%r10,8) // t[j - 1] = r13
movq %rdx,(%rsp,%r10,8) // t[j] = CF
cmpq %r9,%r11 // if i != size
jb .LoopOuter4x
movq 16(%rsp,%r9,8),%rdi // rdi = t[size + 2]
leaq -4(%r9),%r10 // j = size - 4
movq (%rsp),%rax // rax = t[0]
movq 8(%rsp),%rdx // rdx = t[1]
shrq $2,%r10 // r10 = (size - 4) / 4
leaq (%rsp),%rsi // rsi = t[0]
xorq %r11,%r11 // i = 0
subq (%rcx),%rax // rax = t[0] - n[0]
sbbq 8(%rcx),%rdx // rdx = t[1] - (n[1] + CF)
movq 16(%rsi),%rbx // rbx = t[2]
movq 24(%rsi),%rbp // rbp = t[3]
/* 计算 S-N */
.LoopSub4x:
movq %rax,(%rdi,%r11,8) // r[i] = n[0]
movq %rdx,8(%rdi,%r11,8) // r[i + 1] = n[1]
movq 32(%rsi,%r11,8),%rax // rax = t[i + 4]
movq 40(%rsi,%r11,8),%rdx // rdx = t[i + 5]
sbbq 16(%rcx,%r11,8),%rbx // rbx = t[2] - (n[i + 2] + CF)
sbbq 24(%rcx,%r11,8),%rbp // rbp = t[3] - (n[i + 3] + CF)
sbbq 32(%rcx,%r11,8),%rax // rax = t[i + 4] - (n[j + 4] + CF)
sbbq 40(%rcx,%r11,8),%rdx // rdx = t[i + 5] - (n[i + 5] + CF)
movq %rbx,16(%rdi,%r11,8) // r[i + 2] = rbx
movq %rbp,24(%rdi,%r11,8) // r[i + 3] = rbp
movq 48(%rsi,%r11,8),%rbx // rbx = t[i + 6]
movq 56(%rsi,%r11,8),%rbp // rbp = t[i + 7]
leaq 4(%r11),%r11 // i += 4
decq %r10 // j--
jnz .LoopSub4x // if j != 0
sbbq 16(%rcx,%r11,8),%rbx // rbx = rbx = t[i + 6] - (n[i + 2] + CF)
sbbq 24(%rcx,%r11,8),%rbp // rbp = t[i + 7] - (n[i + 3] + CF)
movq %rax,(%rdi,%r11,8) // r[i] = rax
movq %rdx,8(%rdi,%r11,8) // r[i + 1] = rdx
movq %rbx,16(%rdi,%r11,8) // r[i + 2] = rbx
movq %rbp,24(%rdi,%r11,8) // r[i + 3] = rbp
movq 32(%rsi,%r11,8),%rax // rax = t[i + 4]
sbbq $0,%rax // rax -= CF
pxor %xmm2,%xmm2 // xmm0 = 0
movq %rax, %xmm0
pcmpeqd %xmm1,%xmm1 // xmm5 = -1
pshufd $0,%xmm0,%xmm0
movq %r9,%r10 // j = size
pxor %xmm0,%xmm1
shrq $2,%r10 // j = size / 4
xorl %eax,%eax // i = 0
.align 16
.LoopCopy4x:
movdqa (%rsp,%rax),%xmm5 // Copy the result to r.
movdqu (%rdi,%rax),%xmm3
pand %xmm0,%xmm5
pand %xmm1,%xmm3
movdqa 16(%rsp,%rax),%xmm4
movdqa %xmm2,(%rsp,%rax)
por %xmm3,%xmm5
movdqu 16(%rdi,%rax),%xmm3
movdqu %xmm5,(%rdi,%rax)
pand %xmm0,%xmm4
pand %xmm1,%xmm3
movdqa %xmm2,16(%rsp,%rax)
por %xmm3,%xmm4
movdqu %xmm4,16(%rdi,%rax)
leaq 32(%rax),%rax
decq %r10 // j--
jnz .LoopCopy4x
movq 8(%rsp,%r9,8),%rsi // rsi = pressed-stacked rsp.
movq $1,%rax
leaq (%rsi),%rsp // Restore srsp.
RESTORE_REGISTERS
ret
.cfi_endproc
.size MontMul4,.-MontMul4
.type MontSqr8,@function
.align 32
MontSqr8:
.cfi_startproc
SAVE_REGISTERS
movq %rsp,%rax
movl %r9d,%r15d
shll $3,%r9d // Calculate size * 8 bytes.
shlq $5,%r15 // size * 8 * 4
negq %r9
leaq -64(%rsp,%r9,2),%r14 // r14 = rsp[size * 2 - 8]
subq %rsi,%r14
andq $4095,%r14
movq %rsp,%rbp
cmpq %r14,%r15
jae .Loop8xCheckstk
leaq 4032(,%r9,2),%r15 // r15 = 4096 - frame - 2 * size
subq %r15,%r14
movq $0,%r15
cmovcq %r15,%r14
.Loop8xCheckstk:
subq %r14,%rbp
leaq -64(%rbp,%r9,2),%rbp // Allocate a frame + 2 x size.
andq $-64,%rbp // __checkstk implementation,
// which is invoked when the stack size needs to exceed one page.
movq %rsp,%r14
subq %rbp,%r14
andq $-4096,%r14
leaq (%r14,%rbp),%rsp
cmpq %rbp,%rsp
jbe .LoopMul8x
.align 16
.LoopPage8x:
leaq -4096(%rsp),%rsp // Change sp - 4096 each time until sp <= the space to be allocated
cmpq %rbp,%rsp
ja .LoopPage8x
.LoopMul8x:
movq %r9,%r15 // r15 = -size * 8
negq %r9 // Restoresize.
movq %r8,32(%rsp) // Save the values of k0 and sp.
movq %rax,40(%rsp)
movq %rcx, %xmm1 // Pointer to saving n.
pxor %xmm2,%xmm2 // xmm0 = 0
movq %rdi, %xmm0 // Pointer to saving r.
movq %r15, %xmm5 // Save size.
call MontSqr8Inner
leaq (%rdi,%r9),%rbx // rbx = t[size]
movq %r9,%rcx // rcx = -size
movq %r9,%rdx // rdx = -size
movq %xmm0, %rdi // rdi = r
sarq $5,%rcx // rcx >>= 5
.align 32
/* T -= N */
.LoopSub8x:
movq (%rbx),%r13 // r13 = t[i]
movq 8(%rbx),%r12 // r12 = t[i + 1]
movq 16(%rbx),%r11 // r11 = t[i + 2]
movq 24(%rbx),%r10 // r10 = t[i + 3]
sbbq (%rbp),%r13 // r13 = t[i] - (n[i] + CF)
sbbq 8(%rbp),%r12 // r12 = t[i + 1] - (n[i + 1] + CF)
sbbq 16(%rbp),%r11 // r11 = t[i + 2] - (n[i + 2] + CF)
sbbq 24(%rbp),%r10 // r10 = t[i + 3] - (n[i + 3] + CF)
movq %r13,0(%rdi) // Assigning value to r.
movq %r12,8(%rdi)
movq %r11,16(%rdi)
movq %r10,24(%rdi)
leaq 32(%rbp),%rbp // n += 4
leaq 32(%rdi),%rdi // r += 4
leaq 32(%rbx),%rbx // t += 4
incq %rcx
jnz .LoopSub8x
sbbq $0,%rax // rax -= CF
leaq (%rbx,%r9),%rbx
leaq (%rdi,%r9),%rdi
movq %rax,%xmm0
pxor %xmm2,%xmm2
pshufd $0,%xmm0,%xmm0
movq 40(%rsp),%rsi // rsi = pressed-stacked rsp.
.align 32
.LoopCopy8x:
movdqa 0(%rbx),%xmm1 // Copy the result to r.
movdqa 16(%rbx),%xmm5
leaq 32(%rbx),%rbx
movdqu 0(%rdi),%xmm3
movdqu 16(%rdi),%xmm4
leaq 32(%rdi),%rdi
movdqa %xmm2,-32(%rbx)
movdqa %xmm2,-16(%rbx)
movdqa %xmm2,-32(%rbx,%rdx)
movdqa %xmm2,-16(%rbx,%rdx)
pcmpeqd %xmm0,%xmm2
pand %xmm0,%xmm1
pand %xmm0,%xmm5
pand %xmm2,%xmm3
pand %xmm2,%xmm4
pxor %xmm2,%xmm2
por %xmm1,%xmm3
por %xmm5,%xmm4
movdqu %xmm3,-32(%rdi)
movdqu %xmm4,-16(%rdi)
addq $32,%r9
jnz .LoopCopy8x
movq $1,%rax
leaq (%rsi),%rsp // Restore rsp.
RESTORE_REGISTERS // Restore non-volatile register.
ret
.cfi_endproc
.size MontSqr8,.-MontSqr8
.type MontSqr8Inner,@function
.align 32
MontSqr8Inner:
.cfi_startproc
leaq 32(%r15),%rbp // i = -size + 32
leaq (%rsi,%r9),%rsi // rsi = a[size]
movq %r9,%rcx // j = size
movq -32(%rsi,%rbp),%r11 // r11 = a[0]
movq -24(%rsi,%rbp),%r10 // r10 = a[1]
leaq 56(%rsp,%r9,2),%rdi // rdi = t[2 * size]
leaq -16(%rsi,%rbp),%rbx // rbx = a[2]
leaq -32(%rdi,%rbp),%rdi // rdi = t[2 * size - i]
movq %r10,%rax // rax = a[1]
mulq %r11 // (rdx, rax) = a[1] * a[0]
movq %rax,%r15 // r15 = lo(a[1] * a[0])
movq %rdx,%r14 // r14 = hi(a[1] * a[0])
movq %r15,-24(%rdi,%rbp) // t[1] = lo(a[1] * a[0])
movq (%rbx),%rax // rax = a[2]
mulq %r11 // (rdx, rax) = a[2] * a[0]
ADD_CARRY %rax,%r14 // r14 = hi(a[1] * a[0]) + lo(a[2] * a[0])
movq %r14,-16(%rdi,%rbp) // t[2] = hi(a[1] * a[0]) + lo(a[2] * a[0])
movq %rdx,%r15 // r15 = hi(a[2] * a[0])
movq (%rbx),%rax // rax = a[2]
mulq %r10 // (rdx, rax) = a[2] * a[1]
movq %rax,%r13 // r13 = lo(a[2] * a[1])
movq %rdx,%r12 // r12 = hi(a[2] * a[1])
leaq 8(%rbx),%rbx // rbx = a[3]
movq (%rbx),%rax // rax = a[3]
mulq %r11 // (rdx, rax) = a[3] * a[0]
ADD_CARRY %rax,%r15 // r15 = hi(a[2] * a[0]) + lo(a[3] * a[0])
ADD_CARRY %r13,%r15 // r15 = hi(a[2] * a[0]) + lo(a[3] * a[0]) + lo(a[2] * a[1])
movq %rdx,%r14 // r14 = hi(a[3] * a[0])
movq (%rbx),%rax // rax = a[3]
leaq (%rbp),%rcx // j = i
movq %r15,-8(%rdi,%rcx) // t[3] = hi(a[2] * a[0]) + lo(a[3] * a[0]) + lo(a[2] * a[1])
.align 32
.Loop1stSqr4x:
leaq (%rsi,%rcx),%rbx // rbx = a[4]
mulq %r10 // (rdx, rax) = a[3] * a[1]
ADD_CARRY %rax,%r12 // r12 = hi(a[2] * a[1]) + lo(a[3] * a[1])
movq %rdx,%r13 // r13 = hi(a[3] * a[1])
movq (%rbx),%rax // rax = a[4]
mulq %r11 // (rdx, rax) = a[4] * a[0]
ADD_CARRY %rax,%r14 // r14 = hi(a[3] * a[0]) + lo(a[4] * a[0])
ADD_CARRY %r12,%r14 // r14 = hi(a[3] * a[0]) + lo(a[4] * a[0]) + lo(a[3] * a[1])
movq (%rbx),%rax // rax = a[4]
movq %rdx,%r15 // r15 = hi(a[4] * a[0])
mulq %r10 // (rdx, rax) = a[4] * a[1]
ADD_CARRY %rax,%r13 // r13 = hi(a[3] * a[1]) + lo(a[4] * a[1])
movq %r14,(%rdi,%rcx) // t[4] = hi(a[3] * a[0]) + lo(a[4] * a[0]) + lo(a[3] * a[1])
movq %rdx,%r12 // r12 = hi(a[4] * a[1])
leaq 8(%rbx),%rbx // rbx = a[5]
movq (%rbx),%rax // rax = a[5]
mulq %r11 // (rdx, rax) = a[5] * a[0]
ADD_CARRY %rax,%r15 // r15 = hi(a[4] * a[0]) + lo(a[5] * a[0])
ADD_CARRY %r13,%r15 // r15 = hi(a[4] * a[0]) + lo(a[5] * a[0]) + hi(a[3] * a[1]) + lo(a[4] * a[1])
movq (%rbx),%rax // rax = a[5]
movq %rdx,%r14 // r14 = hi(a[5] * a[0])
mulq %r10 // (rdx, rax) = a[5] * a[1]
ADD_CARRY %rax,%r12 // r12 = hi(a[4] * a[1]) + lo(a[5] * a[1])
movq %r15,8(%rdi,%rcx) // t[5] = r10
movq %rdx,%r13 // r13 = hi(a[5] * a[1])
leaq 8(%rbx),%rbx // rbx = a[6]
movq (%rbx),%rax // rax = a[6]
mulq %r11 // (rdx, rax) = a[6] * a[0]
ADD_CARRY %rax,%r14 // r14 = hi(a[5] * a[0]) + lo(a[6] * a[0])
ADD_CARRY %r12,%r14 // r14 = hi(a[5] * a[0]) + lo(a[6] * a[0]) + hi(a[4] * a[1]) + lo(a[5] * a[1])
movq (%rbx),%rax // rax = a[6]
movq %rdx,%r15 // r15 = hi(a[6] * a[0])
mulq %r10 // (rdx, rax) = a[6] * a[1]
ADD_CARRY %rax,%r13 // r13 = lo(a[6] * a[1])
movq %r14,16(%rdi,%rcx) // t[6] = r11
movq %rdx,%r12 // r12 = hi(a[6] * a[1])
leaq 8(%rbx),%rbx // rbx = a[7]
movq (%rbx),%rax // rax = a[7]
mulq %r11 // (rdx, rax) = a[7] * a[0]
ADD_CARRY %rax,%r15 // r15 = hi(a[6] * a[0]) + lo(a[7] * a[0])
ADD_CARRY %r13,%r15 // r15 = hi(a[6] * a[0]) + lo(a[7] * a[0]) + lo(a[6] * a[1])
movq %r15,24(%rdi,%rcx) // t[7] = hi(a[6] * a[0]) + lo(a[7] * a[0]) + lo(a[6] * a[1])
movq %rdx,%r14 // r14 = hi(a[7] * a[0])
movq (%rbx),%rax // rax = a[7]
leaq 32(%rcx),%rcx // j += 2
cmpq $0,%rcx // if j != 0
jne .Loop1stSqr4x
mulq %r10 // (rdx, rax) = a[7] * a[1]
ADD_CARRY %rax,%r12 // r12 = hi(a[6] * a[1]) + lo(a[7] * a[1])
leaq 16(%rbp),%rbp // i++
ADD_CARRY %r14,%r12 // r12 = hi(a[6] * a[1]) + hi(a[7] * a[0]) + lo(a[7] * a[1])
movq %r12,(%rdi) // t[8] = r13
movq %rdx,%r13 // r13 = hi(a[7] * a[1])
movq %rdx,8(%rdi) // t[9] = hi(a[7] * a[1])
.align 32
.LoopOuterSqr4x:
movq -32(%rsi,%rbp),%r11 // r11 = a[0]
movq -24(%rsi,%rbp),%r10 // r10 = a[1]
leaq -16(%rsi,%rbp),%rbx // rbx = a[2]
leaq 56(%rsp,%r9,2),%rdi // rdi = t[size * 2 - i]
leaq -32(%rdi,%rbp),%rdi
movq %r10,%rax // rax = a[1]
mulq %r11 // (rdx, rax) = a[1] * a[0]
movq -24(%rdi,%rbp),%r15 // r15 = t[1]
ADD_CARRY %rax,%r15 // r15 = lo(a[1] * a[0]) + t[1]
movq %r15,-24(%rdi,%rbp) // t[1] = r10
movq %rdx,%r14 // r14 = hi(a[1] * a[0])
movq (%rbx),%rax // rax = a[2]
mulq %r11 // (rdx, rax) = a[2] * a[0]
ADD_CARRY %rax,%r14 // r14 = hi(a[1] * a[0]) + lo(a[2] * a[0])
addq -16(%rdi,%rbp),%r14 // r14 = hi(a[1] * a[0]) + lo(a[2] * a[0]) + t[2]
adcq $0,%rdx // r10 += CF
movq %rdx,%r15 // r10 = hi(a[2] * a[0])
movq %r14,-16(%rdi,%rbp) // t[2] = r11
xorq %r13,%r13 // Clear CF.
movq (%rbx),%rax // rax = a[2]
mulq %r10 // (rdx, rax) = a[2] * a[1]
ADD_CARRY %rax,%r13 // r13 = lo(a[2] * a[1])
addq -8(%rdi,%rbp),%r13 // r13 = lo(a[2] * a[1]) + t[3]
adcq $0,%rdx
movq %rdx,%r12 // r12 = hi(a[2] * a[1])
leaq 8(%rbx),%rbx // rbx = a[3]
movq (%rbx),%rax // rax = a[3]
mulq %r11 // (rdx, rax) = a[3] * a[0]
ADD_CARRY %rax,%r15 // r15 = hi(a[2] * a[0]) + lo(a[3] * a[0])
ADD_CARRY %r13,%r15 // r15 = hi(a[2] * a[0]) + lo(a[3] * a[0]) + lo(a[2] * a[1]) + t[3]
movq (%rbx),%rax // rax = a[3]
leaq (%rbp),%rcx // j = i
movq %r15,-8(%rdi,%rbp) // t[3] = r10
movq %rdx,%r14 // r14 = hi(a[3] * a[0])
.align 32
.LoopInnerSqr4x:
leaq (%rsi,%rcx),%rbx // rbx = a[4]
mulq %r10 // (rdx, rax) = a[3] * a[1]
ADD_CARRY %rax,%r12 // r12 = hi(a[2] * a[1]) + lo(a[3] * a[1])
movq (%rbx),%rax // rax = a[4]
movq %rdx,%r13 // r13 = hi(a[3] * a[1])
addq (%rdi,%rcx),%r12 // r12 = hi(a[2] * a[1]) + lo(a[3] * a[1]) + t[4]
adcq $0,%rdx // r13 += CF
movq %rdx,%r13 // r13 = hi(a[3] * a[1])
mulq %r11 // (rdx, rax) = a[4] * a[0]
ADD_CARRY %rax,%r14 // r14 = hi(a[3] * a[0]) + lo(a[4] * a[0])
ADD_CARRY %r12,%r14 // r14 = hi(a[3] * a[0]) + lo(a[4] * a[0]) + hi(a[2] * a[1]) + lo(a[3] * a[1]) + t[4]
movq %r14,(%rdi,%rcx) // t[4] = r11
movq %rdx,%r15 // r15 = hi(a[4] * a[0])
movq (%rbx),%rax // rax = a[4]
mulq %r10 // (rdx, rax) = a[4] * a[1]
ADD_CARRY %rax,%r13 // r13 = hi(a[3] * a[1]) + lo(a[4] * a[1])
addq 8(%rdi,%rcx),%r13 // r13 = hi(a[3] * a[1]) + lo(a[4] * a[1]) + t[5]
adcq $0,%rdx // r12 += CF
leaq 8(%rbx),%rbx // rbx = a[5]
movq (%rbx),%rax // rax = a[5]
movq %rdx,%r12 // r12 = hi(a[4] * a[1])
mulq %r11 // (rdx, rax) = a[5] * a[0]
ADD_CARRY %rax,%r15 // r15 = hi(a[4] * a[0]) + lo(a[5] * a[0])
ADD_CARRY %r13,%r15 // r15 = hi(a[4] * a[0]) + lo(a[5] * a[0]) + hi(a[3] * a[1]) + lo(a[4] * a[1]) + t[5]
movq %r15,8(%rdi,%rcx) // t[5] = r10
movq %rdx,%r14 // r14 = hi(a[5] * a[0])
movq (%rbx),%rax // rax = a[5]
leaq 16(%rcx),%rcx // j++
cmpq $0,%rcx // if j != 0
jne .LoopInnerSqr4x
mulq %r10 // (rdx, rax) = a[5] * a[1]
ADD_CARRY %rax,%r12 // r12 = hi(a[4] * a[1]) + lo(a[5] * a[1])
ADD_CARRY %r14,%r12 // r12 = hi(a[4] * a[1]) + lo(a[5] * a[1]) + hi(a[5] * a[0])
movq %r12,(%rdi) // t[6] = r13
movq %rdx,%r13 // r13 = hi(a[5] * a[1])
movq %rdx,8(%rdi) // t[7] = hi(a[5] * a[1])
addq $16,%rbp // i++
jnz .LoopOuterSqr4x // if i != 0
movq -32(%rsi),%r11 // r11 = a[0]
leaq 56(%rsp,%r9,2),%rdi // rdi = t[2 * size]
movq -24(%rsi),%rax // rax = a[1]
leaq -32(%rdi,%rbp),%rdi // rdi = t[2 * size - i]
movq -16(%rsi),%rbx // rbx = a[2]
movq %rax,%r10 // r10 = a[1]
mulq %r11 // (rdx, rax) = a[1] * a[0]
ADD_CARRY %rax,%r15 // r15 = lo(a[1] * a[0]) + t[1]
movq %rbx,%rax // rax = a[2]
movq %rdx,%r14 // r14 = hi(a[1] * a[0])
mulq %r11 // (rdx, rax) = a[2] * a[0]
ADD_CARRY %rax,%r14 // r14 = hi(a[1] * a[0]) + lo(a[2] * a[0])
movq %r15,-24(%rdi) // t[1] = r10
ADD_CARRY %r12,%r14 // r14 = lo(a[2] * a[0]) + t[2]
movq %rbx,%rax // rax = a[2]
movq %rdx,%r15 // r15 = hi(a[2] * a[0])
mulq %r10 // (rdx, rax) = a[2] * a[1]
ADD_CARRY %rax,%r13 // r13 = lo(a[2] * a[1]) + t[3]
movq %r14,-16(%rdi) // t[2] = r11
movq %rdx,%r12 // r12 = hi(a[2] * a[1])
movq -8(%rsi),%rbx // rbx = a[3]
movq %rbx,%rax // rax = a[3]
mulq %r11 // (rdx, rax) = a[3] * a[0]
ADD_CARRY %rax,%r15 // r15 = hi(a[2] * a[0]) + lo(a[3] * a[0])
ADD_CARRY %r13,%r15 // r15 = hi(a[2] * a[0]) + lo(a[3] * a[0]) + lo(a[2] * a[1]) + t[3]
movq %rbx,%rax // rax = a[3]
movq %r15,-8(%rdi) // t[3] = r10
movq %rdx,%r14 // r14 = hi(a[3] * a[0])
mulq %r10 // (rdx, rax) = a[3] * a[1]
ADD_CARRY %rax,%r12 // r12 = hi(a[2] * a[1]) + lo(a[3] * a[1])
ADD_CARRY %r14,%r12 // r12 = hi(a[3] * a[0]) + hi(a[2] * a[1]) + lo(a[3] * a[1])
movq %r12,(%rdi) // t[4] = r13
movq %rdx,%r13 // r13 = hi(a[3] * a[1])
movq %rdx,8(%rdi) // t[5] = hi(a[3] * a[1])
movq -16(%rsi),%rax // rax = a[2]
mulq %rbx // (rdx, rax) = a[3] * a[2]
addq $16,%rbp
xorq %r11,%r11
subq %r9,%rbp // i = 16 - size
xorq %r10,%r10
ADD_CARRY %r13,%rax // rax = hi(a[3] * a[1]) + lo(a[3] * a[2])
movq %rax,8(%rdi) // t[5] = hi(a[3] * a[1]) + lo(a[3] * a[2])
movq %rdx,16(%rdi) // t[6] = hi(a[3] * a[2])
movq %r10,24(%rdi) // t[7] = 0
movq -16(%rsi,%rbp),%rax // rax = a[0]
leaq 56(%rsp),%rdi // rdi = t[0]
xorq %r15,%r15
movq 8(%rdi),%r14 // r14 = t[1]
leaq (%r11,%r15,2),%r13
shrq $63,%r15 // Cyclically shifts 63 bits to the right to obtain the lower bits.
leaq (%rcx,%r14,2),%r12 // r12 = t[1] * 2
shrq $63,%r14 // r14 = t[1] >> 63
orq %r15,%r12 // r12 = t[1] * 2
movq 16(%rdi),%r15 // r15 = t[2]
movq %r14,%r11 // r11 = t[1] >> 63
mulq %rax // (rdx, rax) = a[0] * a[0]
negq %r10 // If the value is not 0, CF is set to 1.
adcq %rax,%r13 // r13 = lo(a[0] * a[0])
movq 24(%rdi),%r14 // r14 = t[3]
movq %r13,(%rdi) // t[0] = 0
adcq %rdx,%r12 // r12 = t[1] * 2 + hi(a[0] * a[0])
leaq (%r11,%r15,2),%rbx // rbx = t[2] * 2 + t[1] >> 63
movq -8(%rsi,%rbp),%rax // rax = a[1]
movq %r12,8(%rdi) // t[1] = t[1] * 2 + hi(a[0] * a[0])
sbbq %r10,%r10 // r10 = -CF clear CF
shrq $63,%r15 // r15 = t[2] >> 63
leaq (%rcx,%r14,2),%r8 // r8 = t[3] * 2
shrq $63,%r14 // r14 = t[3] >> 63
orq %r15,%r8 // r8 = (t[3] * 2) + (t[2] >> 63)
movq 32(%rdi),%r15 // r15 = t[4]
movq %r14,%r11 // r11 = t[3] >> 63
mulq %rax // (rdx, rax) = a[1] * a[1]
negq %r10 // If the value is not 0, CF is set to 1.
movq 40(%rdi),%r14 // r14 = t[5]
adcq %rax,%rbx // rbx = t[2] * 2 + t[1] >> 63 + lo(a[1] * a[1])
movq (%rsi,%rbp),%rax // rax = a[2]
movq %rbx,16(%rdi) // t[2] = t[2] * 2 + t[1] >> 63 + lo(a[1] * a[1])
adcq %rdx,%r8 // r8 = t[3] * 2 + t[2] >> 63 + hi(a[1] * a[1])
leaq 16(%rbp),%rbp // i++
movq %r8,24(%rdi) // t[3] = r8
sbbq %r10,%r10 // r10 = -CF clear CF.
leaq 64(%rdi),%rdi // t += 64
.align 32
.LoopShiftAddSqr4x:
leaq (%r11,%r15,2),%r13 // r13 = t[4] * 2 + t[3] >> 63
shrq $63,%r15 // r15 = t[4] >> 63
leaq (%rcx,%r14,2),%r12 // r12 = t[5] * 2
shrq $63,%r14 // r14 = t[5] >> 63
orq %r15,%r12 // r12 = (t[5] * 2) + t[4] >> 63
movq -16(%rdi),%r15 // r15 = t[6]
movq %r14,%r11 // r11 = t[5] >> 63
mulq %rax // (rdx, rax) = a[2] * a[2]
negq %r10 // r10 = CF
movq -8(%rdi),%r14 // r14 = t[7]
adcq %rax,%r13 // r13 = t[4] * 2 + t[3] >> 63 + lo(a[2] * a[2])
movq %r13,-32(%rdi) // t[4] = r12
adcq %rdx,%r12 // r12 = (t[5] * 2) + t[4] >> 63 + hi(a[2] * a[2])
leaq (%r11,%r15,2),%rbx // rbx = t[6] * 2 + t[5] >> 63
movq -8(%rsi,%rbp),%rax // rax = a[3]
movq %r12,-24(%rdi) // t[5] = hi(a[2] * a[2])
sbbq %r10,%r10 // r10 = -CF
shrq $63,%r15 // r15 = t[6] >> 63
leaq (%rcx,%r14,2),%r8 // r8 = t[7] * 2
shrq $63,%r14 // r14 = t[7] >> 63
orq %r15,%r8 // r8 = t[7] * 2 + t[6] >> 63
movq 0(%rdi),%r15 // r15 = t[8]
movq %r14,%r11 // r11 = t[7] >> 63
mulq %rax // (rdx, rax) = a[3] * a[3]
negq %r10 // r10 = CF
movq 8(%rdi),%r14 // r14 = t[9]
adcq %rax,%rbx // rbx = t[6] * 2 + t[5] >> 63 + lo(a[3] * a[3])
movq %rbx,-16(%rdi) // t[6] = rbx
adcq %rdx,%r8 // r8 = t[7] * 2 + t[6] >> 63 + hi(a[3] * a[3])
leaq (%r11,%r15,2),%r13 // r13 = t[8] * 2 + t[7] >> 63
movq %r8,-8(%rdi) // t[7] = hi(a[3] * a[3])
movq (%rsi,%rbp),%rax // rax = a[4]
sbbq %r10,%r10 // r10 = -CF
shrq $63,%r15 // r15 = t[8] >> 63
leaq (%rcx,%r14,2),%r12 // r12 = t[9] * 2
shrq $63,%r14 // r14 = t[9] >> 63
orq %r15,%r12 // r12 = t[9] * 2 + t[8] >> 63
movq 16(%rdi),%r15 // r15 = t[10]
movq %r14,%r11 // r11 = t[9] >> 63
mulq %rax // (rdx, rax) = a[4] * a[4]
negq %r10 // r10 = -CF
movq 24(%rdi),%r14 // r14 = t[11]
adcq %rax,%r13 // r13 = t[8] * 2 + t[7] >> 63 + lo(a[4] * a[4])
movq %r13,(%rdi) // t[8] = r12
adcq %rdx,%r12 // r12 = t[9] * 2 + t[8] >> 63 + hi(a[4] * a[4])
leaq (%r11,%r15,2),%rbx // rbx = t[10] * 2 + t[9] >> 63
movq 8(%rsi,%rbp),%rax // rax = a[5]
movq %r12,8(%rdi) // t[9] = r13
sbbq %r10,%r10 // r10 = -CF
shrq $63,%r15 // r15 = t[10] >> 63
leaq (%rcx,%r14,2),%r8 // r8 = t[11] * 2
shrq $63,%r14 // r14 = t[11] >> 63
orq %r15,%r8 // r8 = t[11] * 2 + t[10] >> 63
movq 32(%rdi),%r15 // r15 = t[12]
movq %r14,%r11 // r11 = t[11] >> 63
mulq %rax // (rdx, rax) = a[5] * a[5]
negq %r10 // r10 = CF
movq 40(%rdi),%r14 // r14 = t[13]
adcq %rax,%rbx // rbx = t[10] * 2 + t[9] >> 63 + lo(a[5] * a[5])
movq 16(%rsi,%rbp),%rax // rax = a[6]
movq %rbx,16(%rdi) // t[10] = rbx
adcq %rdx,%r8 // r8 = t[11] * 2 + t[10] >> 63 + hi(a[5] * a[5])
movq %r8,24(%rdi) // t[11] = r8
sbbq %r10,%r10 // r10 = -CF
leaq 64(%rdi),%rdi // t += 64
addq $32,%rbp // i += 4
jnz .LoopShiftAddSqr4x // if i != 0
leaq (%r11,%r15,2),%r13 // r13 = t[12] * 2 + t[11] >> 63
shrq $63,%r15 // r15 = t[12] >> 63
leaq (%rcx,%r14,2),%r12 // r12 = t[13] * 2
shrq $63,%r14 // r14 = t[13] >> 63
orq %r15,%r12 // r12 = t[13] * 2 + t[12] >> 63
movq -16(%rdi),%r15 // r15 = t[14]
movq %r14,%r11 // r11 = t[13] >> 63
mulq %rax // (rdx, rax) = a[6] * a[6]
negq %r10 // r10 = CF
movq -8(%rdi),%r14 // r14 = t[15]
adcq %rax,%r13 // r13 = t[12] * 2 + t[11] >> 63 + lo(a[6] * a[6])
movq %r13,-32(%rdi) // t[12] = r12
adcq %rdx,%r12 // r12 = t[13] * 2 + t[12] >> 63 + hi(a[6] * a[6])
leaq (%r11,%r15,2),%rbx // rbx = t[14] * 2 + t[13] >> 63
movq -8(%rsi),%rax // rax = a[7]
movq %r12,-24(%rdi) // t[13] = r13
sbbq %r10,%r10 // r10 = -CF
shrq $63,%r15 // r15 = t[14] >> 63
leaq (%rcx,%r14,2),%r8 // r8 = t[15] * 2
shrq $63,%r14 // r14 = t[15] >> 63
orq %r15,%r8 // r8 = t[15] * 2 + t[14] >> 63
mulq %rax // (rdx, rax) = a[7] * a[7]
negq %r10 // r10 = CF
adcq %rax,%rbx // rbx = t[14] * 2 + t[13] >> 63 + lo(a[7] * a[7])
adcq %rdx,%r8 // r8 = t[15] * 2 + t[14] >> 63 + hi(a[7] * a[7])
movq %rbx,-16(%rdi) // t[14] = rbx
movq %r8,-8(%rdi) // t[15] = r8
movq %xmm1,%rbp // rbp = n
xorq %rax,%rax // rax = 0
leaq (%r9,%rbp),%rcx // rcx = n[size]
leaq 56(%rsp,%r9,2),%rdx // rdx = t[size * 2]
movq %rcx,8(%rsp)
leaq 56(%rsp,%r9),%rdi
movq %rdx,16(%rsp)
negq %r9
.align 32
.LoopReduceSqr8x:
leaq (%rdi,%r9),%rdi // rdi = t[]
movq (%rdi),%rbx // rbx = t[0]
movq 8(%rdi),%r9 // r9 = t[1]
movq 16(%rdi),%r15 // r15 = t[2]
movq 24(%rdi),%r14 // r14 = t[3]
movq 32(%rdi),%r13 // r13 = t[4]
movq 40(%rdi),%r12 // r12 = t[5]
movq 48(%rdi),%r11 // r11 = t[6]
movq 56(%rdi),%r10 // r10 = t[7]
movq %rax,(%rdx) // Store the highest carry bit.
leaq 64(%rdi),%rdi // rdi = t[8]
movq %rbx,%r8 // r8 = t[0]
imulq 40(%rsp),%rbx // rbx = k0 * t[0]
movl $8,%ecx
.align 32
.LoopReduce8x:
movq (%rbp),%rax // rax = n[0]
mulq %rbx // (rdx, rax) = t[0] * k0 * n[0]
negq %r8 // r8 = -t[0], If t[0] is not 0, set CF to 1.
movq %rdx,%r8 // r8 = hi(t[0] * k0 * n[0])
adcq $0,%r8 // r8 += CF
movq 8(%rbp),%rax // rax = n[1]
mulq %rbx // (rdx, rax) = t[0] * k0 * n[1]
movq %rbx,48(%rsp,%rcx,8)
ADD_CARRY %rax,%r9 // r9 = t[1] + lo(t[0] * k0 * n[1])
ADD_CARRY %r9,%r8 // r8 = hi(t[0] * k0 * n[0]) + lo(t[0] * k0 * n[1]) + t[1]
movq 16(%rbp),%rax // rax = n[2]
movq %rdx,%r9 // r9 = hi(t[0] * k0 * n[1])
mulq %rbx // (rdx, rax) = t[0] * k0 * n[2]
ADD_CARRY %rax,%r15 // r15 = lo(t[0] * k0 * n[2]) + t[2]
ADD_CARRY %r15,%r9 // r9 = hi(t[0] * k0 * n[1]) + lo(t[0] * k0 * n[2]) + t[2]
movq 40(%rsp),%rsi // rsi = k0
movq %rdx,%r15 // r15 = hi(t[0] * k0 * n[2])
movq 24(%rbp),%rax // rax = n[3]
mulq %rbx // (rdx, rax) = t[0] * k0 * n[3]
ADD_CARRY %rax,%r14 // r14 = lo(t[0] * k0 * n[3])
imulq %r8,%rsi // rsi = k0 * r8
ADD_CARRY %r14,%r15 // r15 = hi(t[0] * k0 * n[2]) + lo(t[0] * k0 * n[3])
movq %rdx,%r14 // r14 = hi(t[0] * k0 * n[3])
movq 32(%rbp),%rax // rax = n[4]
mulq %rbx // (rdx, rax) = t[0] * k0 * n[4]
ADD_CARRY %rax,%r13 // r13 = lo(t[0] * k0 * n[4]) + t[4]
ADD_CARRY %r13,%r14 // r14 = hi(t[0] * k0 * n[3]) + lo(t[0] * k0 * n[4]) + t[4]、
movq 40(%rbp),%rax // rax = n[5]
movq %rdx,%r13 // r13 = hi(t[0] * k0 * n[4])
mulq %rbx // (rdx, rax) = t[0] * k0 * n[5]
ADD_CARRY %rax,%r12 // r12 = lo(t[0] * k0 * n[5]) + t[5]
ADD_CARRY %r12,%r13 // r13 = hi(t[0] * k0 * n[4]) + lo(t[0] * k0 * n[5]) + t[5]
movq 48(%rbp),%rax // rax = n[6]
movq %rdx,%r12 // r12 = hi(t[0] * k0 * n[5])
mulq %rbx // (rdx, rax) = t[0] * k0 * n[6]
ADD_CARRY %rax,%r11 // r11 = lo(t[0] * k0 * n[6]) + t[6]
ADD_CARRY %r11,%r12 // r12 = hi(t[0] * k0 * n[5]) + lo(t[0] * k0 * n[6]) + t[6]
movq 56(%rbp),%rax // rax = n[7]
movq %rdx,%r11 // r11 = hi(t[0] * k0 * n[6])
mulq %rbx // (rdx, rax) = t[0] * k0 * n[7]
movq %rsi,%rbx // rbx = k0 * r8
ADD_CARRY %rax,%r10 // r10 = lo(t[0] * k0 * n[7]) + t[7]
ADD_CARRY %r10,%r11 // r11 = hi(t[0] * k0 * n[6]) + lo(t[0] * k0 * n[7]) + t[7]
movq %rdx,%r10 // r10 = hi(t[0] * k0 * n[7])
decl %ecx // ecx--
jnz .LoopReduce8x // if ecx != 0
leaq 64(%rbp),%rbp // rbp += 64, n Pointer Offset.
xorq %rax,%rax // rax = 0
movq 16(%rsp),%rdx // rdx = t[size * 2]
cmpq 8(%rsp),%rbp // rbp = n[size]
jae .LoopEndCondMul8x
addq (%rdi),%r8 // r8 += t[0]
adcq 8(%rdi),%r9 // r9 += t[1]
adcq 16(%rdi),%r15 // r15 += t[2]
adcq 24(%rdi),%r14 // r14 += t[3]
adcq 32(%rdi),%r13 // r13 += t[4]
adcq 40(%rdi),%r12 // r12 += t[5]
adcq 48(%rdi),%r11 // r11 += t[6]
adcq 56(%rdi),%r10 // r10 += t[7]
sbbq %rsi,%rsi // rsi = -CF
movq 112(%rsp),%rbx // rbx = t[0] * k0, 48 + 56 + 8
movl $8,%ecx
.align 32
.LoopLastSqr8x:
movq (%rbp),%rax // rax = n[0]
mulq %rbx // (rdx, rax) = t[0] * k0 * n[0]
ADD_CARRY %rax,%r8 // r8 += lo(t[0] * k0 * n[0])
movq %r8,(%rdi) // t[0] = r8
leaq 8(%rdi),%rdi // t++
movq %rdx,%r8 // r8 = hi(t[0] * k0 * n[0])
movq 8(%rbp),%rax // rax = n[1]
mulq %rbx // (rdx, rax) = t[0] * k0 * n[1]
ADD_CARRY %rax,%r9 // r9 += lo(t[0] * k0 * n[1])
ADD_CARRY %r9,%r8 // r8 = hi(t[0] * k0 * n[0]) + r9
movq 16(%rbp),%rax // rax = n[2]
movq %rdx,%r9 // r9 = hi(t[0] * k0 * n[1])
mulq %rbx // (rdx, rax) = t[0] * k0 * n[2]
ADD_CARRY %rax,%r15 // r15 += lo(t[0] * k0 * n[2])
ADD_CARRY %r15,%r9 // r9 = hi(t[0] * k0 * n[1]) + r10
movq 24(%rbp),%rax // rax = n[3]
movq %rdx,%r15 // r15 = hi(t[0] * k0 * n[2])
mulq %rbx // (rdx, rax) = t[0] * k0 * n[3]
ADD_CARRY %rax,%r14 // r14 += lo(t[0] * k0 * n[3])
ADD_CARRY %r14,%r15 // r15 = hi(t[0] * k0 * n[2]) + r11
movq 32(%rbp),%rax // rax = n[4]
movq %rdx,%r14 // r14 = hi(t[0] * k0 * n[3])
mulq %rbx // (rdx, rax) = t[0] * k0 * n[4]
ADD_CARRY %rax,%r13 // r13 += lo(t[0] * k0 * n[4])
ADD_CARRY %r13,%r14 // r14 = hi(t[0] * k0 * n[3]) + r12
movq 40(%rbp),%rax // rax = n[5]
movq %rdx,%r13 // r13 = hi(t[0] * k0 * n[4])
mulq %rbx // (rdx, rax) = t[0] * k0 * n[5]
ADD_CARRY %rax,%r12 // r12 += lo(t[0] * k0 * n[5])
ADD_CARRY %r12,%r13 // r13 = hi(t[0] * k0 * n[4]) + r13
movq 48(%rbp),%rax // rax = n[6]
movq %rdx,%r12 // r12 = hi(t[0] * k0 * n[5])
mulq %rbx // (rdx, rax) = t[0] * k0 * n[6]
ADD_CARRY %rax,%r11 // r11 += lo(t[0] * k0 * n[6])
ADD_CARRY %r11,%r12 // r12 = hi(t[0] * k0 * n[5]) + r14
movq 56(%rbp),%rax // rax = n[7]
movq %rdx,%r11 // r11 = hi(t[0] * k0 * n[6])
mulq %rbx // (rdx, rax) = t[0] * k0 * n[7]
movq 40(%rsp,%rcx,8),%rbx // rbx = t[i] * k0
ADD_CARRY %rax,%r10 // r10 += lo(t[0] * k0 * n[7])
ADD_CARRY %r10,%r11 // r11 = hi(t[0] * k0 * n[6]) + r10
movq %rdx,%r10 // r10 = hi(t[0] * k0 * n[7])
decl %ecx // ecx--
jnz .LoopLastSqr8x // if ecx != 0
leaq 64(%rbp),%rbp // n += 8
movq 16(%rsp),%rdx // rdx = t[size * 2]
cmpq 8(%rsp),%rbp // Check whether rbp is at the end of the n array. If yes, exit the loop.
jae .LoopSqrBreak8x
movq 112(%rsp),%rbx // rbx = t[0] * k0
negq %rsi // rsi = CF
movq (%rbp),%rax // rax = = n[0]
adcq (%rdi),%r8 // r8 = t[0]
adcq 8(%rdi),%r9 // r9 = t[1]
adcq 16(%rdi),%r15 // r15 = t[2]
adcq 24(%rdi),%r14 // r14 = t[3]
adcq 32(%rdi),%r13 // r13 = t[4]
adcq 40(%rdi),%r12 // r12 = t[5]
adcq 48(%rdi),%r11 // r11 = t[6]
adcq 56(%rdi),%r10 // r10 = t[7]
sbbq %rsi,%rsi // rsi = -CF
movl $8,%ecx // ecx = 8
jmp .LoopLastSqr8x
.align 32
.LoopSqrBreak8x:
xorq %rax,%rax // rax = 0
addq (%rdx),%r8 // r8 += Highest carry bit.
adcq $0,%r9 // r9 += CF
adcq $0,%r15 // r15 += CF
adcq $0,%r14 // r14 += CF
adcq $0,%r13 // r13 += CF
adcq $0,%r12 // r12 += CF
adcq $0,%r11 // r11 += CF
adcq $0,%r10 // r10 += CF
adcq $0,%rax // rax += CF
negq %rsi // rsi = CF
.LoopEndCondMul8x:
adcq (%rdi),%r8 // r8 += t[0]
adcq 8(%rdi),%r9 // r9 += t[1]
adcq 16(%rdi),%r15 // r15 += t[2]
adcq 24(%rdi),%r14 // r14 += t[3]
adcq 32(%rdi),%r13 // r13 += t[4]
adcq 40(%rdi),%r12 // r12 += t[5]
adcq 48(%rdi),%r11 // r11 += t[6]
adcq 56(%rdi),%r10 // r10 += t[7]
adcq $0,%rax // rax += CF
movq -8(%rbp),%rcx // rcx = n[7]
xorq %rsi,%rsi // rsi = 0
movq %xmm1,%rbp // rbp = n
movq %r8,(%rdi) // Save the calculated result back to t[].
movq %r9,8(%rdi)
movq %xmm5,%r9
movq %r15,16(%rdi)
movq %r14,24(%rdi)
movq %r13,32(%rdi)
movq %r12,40(%rdi)
movq %r11,48(%rdi)
movq %r10,56(%rdi)
leaq 64(%rdi),%rdi // t += 8
cmpq %rdx,%rdi // Cycle the entire t[].
jb .LoopReduceSqr8x
ret
.cfi_endproc
.size MontSqr8Inner,.-MontSqr8Inner
#endif
| 2302_82127028/openHiTLS-examples_1508 | crypto/bn/src/asm/bn_mont_x86_64.S | Unix Assembly | unknown | 61,353 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#include "hitls_build.h"
#ifdef HITLS_CRYPTO_BN
.file "bn_mont_x86_64.S"
.text
.macro ADD_CARRY a b
addq \a,\b
adcq $0,%rdx
.endm
.macro SAVE_REGISTERS
pushq %r15 // Save non-volatile register.
pushq %r14
pushq %r13
pushq %r12
pushq %rbp
pushq %rbx
.endm
.macro RESTORE_REGISTERS
popq %rbx // Restore non-volatile register.
popq %rbp
popq %r12
popq %r13
popq %r14
popq %r15
.endm
/*
* void MontMulx_Asm(uint64_t *r, const uint64_t *a, const uint64_t *b,
* const uint64_t *n, const uint64_t k0, uint32_t size);
*/
.globl MontMulx_Asm
.type MontMulx_Asm,@function
.align 16
MontMulx_Asm:
.cfi_startproc
testl $3,%r9d
jnz .LMontMul // If size is not divisible by 4, LMontMul.
cmpl $8,%r9d
jb .LMontMul // LMontMul
cmpq %rsi,%rdx
jne MontMul4x // if a != b, MontMul4x
testl $7,%r9d
jz MontSqr8x // If size is divisible by 8,enter MontSqr8x.
jmp MontMul4x
.align 16
.LMontMul:
SAVE_REGISTERS // Save non-volatile register.
movq %rsp,%rax // rax stores the rsp
movq %r9, %r15
negq %r15 // r15 = -size
leaq -16(%rsp, %r15, 8), %r15 // r15 = rsp - size * 8 - 16
andq $-1024, %r15 // r15 The address is aligned down by 1 KB.
movq %rsp, %r14 // r14 = rsp
subq %r15,%r14 // __chkstk implemention, called when the stack size needs to exceed 4096.
// (the size of a page) to allocate more pages.
andq $-4096,%r14 // r14 4K down-align.
leaq (%r15,%r14),%rsp // rsp = r15 + r14
cmpq %r15,%rsp // If you want to allocate more than one page, go to Lmul_page_walk.
ja .LoopPage
jmp .LMulBody
.align 16
.LoopPage:
leaq -4096(%rsp),%rsp // rsp - 4096 each time until rsp < r15.
cmpq %r15,%rsp
ja .LoopPage
.LMulBody:
movq %rax,8(%rsp,%r9,8) // Save the original rsp in the stack.
movq %rdx,%r13 // r13 = b
xorq %r11,%r11 // r11 = 0
xorq %r10,%r10 // r10 = 0
movq (%r13),%rbx // rbx = b[0]
movq (%rsi),%rax // rax = a[0]
mulq %rbx // (rdx, rax) = a[0] * b[0]
movq %rax,%r15 // r15 = t[0] = lo(a[0] * b[0])
movq %rdx,%r14 // r14 = hi(a[0] * b[0])
movq %r8,%rbp // rbp = k0
imulq %r15,%rbp // rbp = t[0] * k0
movq (%rcx),%rax // rax = n[0]
mulq %rbp // (rdx, rax) = t[0] * k0 * n[0]
ADD_CARRY %rax,%r15 // r15 = lo(t[0] * k0 * n[0]) + t[0]
leaq 1(%r10),%r10 // j++
.Loop1st:
movq (%rsi,%r10,8),%rax // rax = a[j]
movq %rdx,%r12 // r12 = hi(t[0] * k0 * n[0])
mulq %rbx // (rdx, rax) = a[j] * b[0]
ADD_CARRY %rax,%r14 // r14 = hi(a[j - 1] * b[0]) + lo(a[j] * b[0])
movq %rdx,%r15 // r15 = hi(a[j] * b[0])
movq (%rcx,%r10,8),%rax // rax = n[j]
mulq %rbp // (rdx, rax) = t[0] * k0 * n[j]
leaq 1(%r10),%r10 // j++
cmpq %r9,%r10 // if j != size, loop L1st
je .Loop1stSkip
ADD_CARRY %rax,%r12 // r12 = hi(t[0] * k0 * n[j]) + lo(t[0] * k0 * n[j])
ADD_CARRY %r14,%r12 // r12 += lo(a[j] * b[0]) + hi(a[j] * b[0])
movq %r12,-16(%rsp,%r10,8) // t[j - 2] = r13
movq %r15,%r14 // r14 = hi(a[j] * b[0])
jmp .Loop1st
.Loop1stSkip:
ADD_CARRY %rax,%r12 // r12 = hi(t[0] * k0 * n[j - 1]) + lo(t[0] * k0 * n[j])
ADD_CARRY %r14,%r12 // r12 += hi(a[j - 1] * b[0]) + lo(a[j] * b[0])
movq %r12,-16(%rsp,%r10,8) // t[j - 2] = r13
movq %r15,%r14 // r14 = hi(a[j] * b[0])
movq %rdx,%r12 // r12 = hi(t[0] * k0 * n[j])
xorq %rdx,%rdx // rdx = 0, Clearing the CF.
ADD_CARRY %r14,%r12 // r12 = hi(t[0] * k0 * n[j]) + hi(a[j] * b[0])
movq %r12,-8(%rsp,%r9,8) // t[size - 1] = hi(t[0] * k0 * n[j]) + hi(a[j] * b[0]), save overflow bit.
movq %rdx,(%rsp,%r9,8)
leaq 1(%r11),%r11 // i++
.align 16
.LoopOuter:
xorq %r10,%r10 // j = 0
movq (%rsi),%rax // rax = a[0]
movq (%r13,%r11,8),%rbx // rbx = b[i]
mulq %rbx // (rdx, rax) = a[0] * b[i]
movq (%rsp),%r15 // r15 = lo(a[0] * b[i]) + t[0]
ADD_CARRY %rax,%r15
movq %rdx,%r14 // r14 = hi(a[0] * b[i])
movq %r8,%rbp // rbp = t[0] * k0
imulq %r15,%rbp
movq (%rcx),%rax // rax = n[0]
mulq %rbp // (rdx, rax) = t[0] * k0 * n[0]
ADD_CARRY %rax,%r15 // r15 = lo(t[0] * k0 * n[0])
leaq 1(%r10),%r10 // j++
.align 16
.LoopInner:
movq (%rsi,%r10,8),%rax // rax = a[j]
movq %rdx,%r12 // r12 = hi(t[0] * k0 * n[j])
movq (%rsp,%r10,8),%r15 // r15 = t[j]
mulq %rbx // (rdx, rax) = a[1] * b[i]
ADD_CARRY %rax,%r14 // r14 = hi(a[0] * b[i]) + lo(a[1] * b[i])
movq (%rcx,%r10,8),%rax // rax = n[j]
ADD_CARRY %r14,%r15 // r15 = a[j] * b[i] + t[j]
movq %rdx,%r14
leaq 1(%r10),%r10 // j++
mulq %rbp // (rdx, rax) = t[0] * k0 * n[j]
cmpq %r9,%r10 // if j != size, loop Linner
je .LoopInnerSkip
ADD_CARRY %rax,%r12 // r12 = t[0] * k0 * n[j]
ADD_CARRY %r15,%r12 // r12 = a[j] * b[i] + t[j] + n[j] * t[0] * k0
movq %r12,-16(%rsp,%r10,8) // t[j - 2] = r13
jmp .LoopInner
.LoopInnerSkip:
ADD_CARRY %rax,%r12 // r12 = t[0] * k0 * n[j]
ADD_CARRY %r15,%r12 // r12 = t[0] * k0 * n[j] + a[j] * b[i] + t[j]
movq (%rsp,%r10,8),%r15 // r15 = t[j]
movq %r12,-16(%rsp,%r10,8) // t[j - 2]
movq %rdx,%r12 // r12 = hi(t[0] * k0 * n[j])
xorq %rdx,%rdx // rdx 0
ADD_CARRY %r14,%r12 // r12 = hi(a[1] * b[i]) + hi(t[0] * k0 * n[j])
ADD_CARRY %r15,%r12 // r12 += t[j]
movq %r12,-8(%rsp,%r9,8) // t[size - 1] = r13
movq %rdx,(%rsp,%r9,8) // t[size] = CF
leaq 1(%r11),%r11 // i++
cmpq %r9,%r11 // if size < i (unsigned)
jne .LoopOuter
xorq %r11,%r11 // r11 = 0, clear CF.
movq (%rsp),%rax // rax = t[0]
movq %r9,%r10 // r10 = size
.align 16
.LoopSub:
sbbq (%rcx,%r11,8),%rax // r[i] = t[i] - n[i]
movq %rax,(%rdi,%r11,8)
movq 8(%rsp,%r11,8),%rax // rax = t[i + 1]
leaq 1(%r11),%r11 // i++
decq %r10 // j--
jnz .LoopSub // if j != 0
sbbq $0,%rax // rax -= CF
movq $-1,%rbx
xorq %rax,%rbx // rbx = !t[i + 1]
xorq %r11,%r11 // r11 = 0
movq %r9,%r10 // r10 = size
.LoopCopy:
movq (%rdi,%r11,8),%rcx // rcx = r[i] & t[i]
andq %rbx,%rcx
movq (%rsp,%r11,8),%rdx // rdx = CF & t[i]
andq %rax,%rdx
orq %rcx,%rdx
movq %rdx,(%rdi,%r11,8) // r[i] = t[i]
movq %r9,(%rsp,%r11,8) // t[i] = size
leaq 1(%r11),%r11 // i++
subq $1,%r10 // j--
jnz .LoopCopy // if j != 0
movq 8(%rsp,%r9,8),%rsi // rsi = pressed-stacked rsp.
movq $1,%rax // rax = 1
leaq (%rsi),%rsp // restore rsp.
RESTORE_REGISTERS // Restore non-volatile register.
ret
.cfi_endproc
.size MontMulx_Asm,.-MontMulx_Asm
.type MontMul4x,@function
.align 16
MontMul4x:
.cfi_startproc
SAVE_REGISTERS
movq %rsp,%rax // save rsp
movq %r9,%r15
negq %r15
leaq -48(%rsp,%r15,8),%r15 // Allocate space: size * 8 + 48 bytes.
andq $-1024,%r15
movq %rsp,%r14
subq %r15,%r14 // __chkstk implemention, called when the stack size needs to exceed 4096.
andq $-4096,%r14
leaq (%r15,%r14),%rsp
cmpq %r15,%rsp // If you want to allocate more than one page, go to LoopPage4x.
ja .LoopPage4x
jmp .LoopMul4x
.LoopPage4x:
leaq -4096(%rsp),%rsp // rsp - 4096each time until rsp >= r10.
cmpq %r15,%rsp
ja .LoopPage4x
.LoopMul4x:
movq %rax, 0(%rsp) // save stack pointer
movq %rdi, 8(%rsp) // save r
movq %r8, 16(%rsp) // save k0
movq %r9, %r10
shrq $2, %r9
decq %r9
movq %r9, 24(%rsp) // save (size/4) - 1
shlq $3, %r10
movq %rdx, %r12 // r12 = b
movq %r10, 32(%rsp) // save (size * 8) -> bytes
addq %r10, %r12 // r12 = loc(b[size - 1])
leaq 80(%rsp),%rbp // rbp: start position of the tmp buffer
movq %rdx,%r13 // r13 = b
movq %r12, 40(%rsp) // save loc(b + size * 8)
movq (%r13),%rdx // rbx = b[0]
// cal a[0 ~ 3] * b[0]
mulx (%rsi), %r12, %r14 // r14 = hi(a[0] * b[0]), r12 = lo(b[0] * a[0])
mulx 8(%rsi), %rax, %r15 // (r15, rax) = a[1] * b[0]
addq %rax, %r14 // r14 = hi(a[0] * b[0]) + lo(a[1] * b[0])
mulx 16(%rsi), %rax, %r11 // (rax, r11) = a[2] * b[0]
adcq %rax, %r15 // r15 = hi(a[1] * b[0]) + lo(a[2] * b[0])
adcq $0, %r11 // r11 = hi(a[2] * b[0]) + CF
imulq %r12,%r8 // r8 = t[0] * k0, will change CF
xorq %r10,%r10 // get r10 = 0
mulx 24(%rsi), %rax, %rbx // (rax, rbx) = a[3] * b[0]
movq %r8, %rdx // rdx = t[0] * k0 = m'
adcx %rax, %r11 // r11 = hi(a[2] * b[0]) + lo(a[3] * b[0])
adcx %r10, %rbx // rbx = hi(a[3] * b[0])
// cal n[0 ~ 3] * t[0] * k0
mulx (%rcx), %rax, %rdi // (rdi, rax) = n[0] * m'
adcx %rax, %r12 // r12 = lo(b[0] * a[0]) + lo(n[0] * m')
adox %r14, %rdi // r8 = hi(n[0] * m') + hi(a[0] * b[0]) + hi(n[0] * m')
mulx 8(%rcx), %rax, %r14 // (r14, rax) = n[1] * m'
adcx %rax, %rdi
adox %r15, %r14 // r11 = hi(a[1] * b[0]) + lo(a[2] * b[0]) + hi(n[1] * m')
movq %rdi, -32(%rbp)
mulx 16(%rcx), %rax, %r15 // (r15, rax) = n[2] * m'
adcx %rax, %r14
adox %r11, %r15 // r11 = hi(a[2] * b[0]) + lo(a[3] * b[0]) + hi(n[2] * m')
movq %r14, -24(%rbp)
mulx 24(%rcx), %rax, %r11 // (r11, rax) = n[3] * m'
adcx %rax, %r15
adox %r10, %r11 // r11 = hi(n[3] * m')
movq %r15, -16(%rbp)
leaq 4*8(%rsi),%rsi // a offset 4 blocks
leaq 4*8(%rcx),%rcx // n offset 4 blocks
movq (%r13),%rdx // rdx = b[0]
.align 16
.Loop1st4x:
mulx (%rsi), %r12, %r14 // r14 = hi(a[4] * b[0]), r12 = lo(a[4] * b[0])
adcx %r10, %r11 // r11 += carry
mulx 8(%rsi), %rax, %r15 // r15 = hi(a[5] * b[0]), rax = lo(a[5] * b[0])
adcx %rbx, %r12 // r12 = hi(a[3] * b[0]) + lo(a[4] * b[0])
adcx %rax, %r14 // r14 = hi(a[4] * b[0]) + lo(a[5] * a[0])
mulx 16(%rsi), %rax, %rdi // rax = hi(a[6] * b[0]), rax = lo(a[6] * b[0])
adcx %rax, %r15 // r15 = hi(a[5] * b[0]) + lo(a[6] * a[0])
mulx 24(%rsi), %rax, %rbx // rax = hi(a[7] * b[0]), rdi = lo(a[7] * b[0])
adcx %rax, %rdi // rbx = hi(a[6] * b[0]) + lo(a[7] * b[0])
adcx %r10, %rbx // rdi = hi(a[7] * b[0]) + CF
movq %r8, %rdx
adox %r11,%r12 // r12 = hi(a[3] * b[0]) + lo(b[4] * a[0]) + hi(n[3] * m')
mulx (%rcx), %rax, %r11 // (rax, r8) = n[4] * m'
leaq 4*8(%rsi), %rsi // a offset 4 blocks
adcx %rax,%r12 // r12 = hi(a[3] * b[0]) + lo(b[4] * a[0])
// + hi(n[3] * m') + lo(n[4] * m')
adox %r14, %r11 // r8 = hi(a[4] * b[0]) + lo(a[5] * b[0]) + hi(n[4] * m')
mulx 8(%rcx), %rax, %r14 // (rax, r14) = n[5] * m'
leaq 4*8(%rbp), %rbp // tmp offset 4 blocks
adcx %rax, %r11 // r8 = hi(a[4] * b[0]) + lo(a[5] * b[0])
// + hi(n[4] * m') + lo(n[5] * m')
adox %r15, %r14 // r14 = hi(a[5] * b[0]) + lo(a[6] * a[0])
// + ho(n[5] * m')
mulx 16(%rcx), %rax, %r15 // (rax, r15) = n[6] * m'
movq %r12, -5*8(%rbp)
adcx %rax, %r14 // r14 = hi(a[5] * b[0]) + lo(a[6] * a[0])
// + hi(n[5] * m') + lo(n[6] * m')
adox %rdi, %r15 // r15 = hi(a[6] * b[0]) + lo(a[7] * b[0])
// + hi(n[6] * m')
movq %r11, -4*8(%rbp)
mulx 24(%rcx), %rax, %r11 // (rax, r11) = n[7] * m'
movq %r14, -3*8(%rbp)
adcx %rax, %r15 // r15 = hi(a[6] * b[0]) + lo(a[7] * b[0])
// + hi(n[6] * m') + lo(n[7] * m')
adox %r10, %r11
movq %r15, -2*8(%rbp)
leaq 4*8(%rcx), %rcx // n offset 4 blocks
movq (%r13),%rdx // recover rdx
dec %r9
jnz .Loop1st4x
movq 32(%rsp), %r15 // r15 = size * 8
leaq 8(%r13), %r13 // b offset 1 blocks
adcx %r10, %r11 // hi(n[7] * m') + CF, here OX CF are carried.
addq %r11, %rbx // hi(a[7] * b[0]) + hi(n[7] * m')
sbbq %r11,%r11 // check r11 > 0
movq %rbx, -1*8(%rbp)
.align 4
.LoopOuter4x:
// cal a[0 ~ 3] * b[i]
movq (%r13),%rdx // rdx = b[i]
mov %r11, (%rbp) // keep the highest carry
subq %r15, %rsi // get a[0]
subq %r15, %rcx // get n[0]
leaq 80(%rsp),%rbp // get tmp[0]
// from here, a[0 ~ 3] * b[i] needs to add tmp
mulx (%rsi), %r12, %r14 // r14 = hi(a[0] * b[i]), r12 = lo(b[i] * a[0])
xorq %r10,%r10 // get r10 = 0, and clear CF OF
mulx 8(%rsi), %rax, %r15 // (r15, rax) = a[1] * b[i]
adox -4*8(%rbp), %r12 // lo(a[1] * b[i]) + tmp[0]
adcx %rax, %r14 // r14 = hi(a[0] * b[i]) + lo(a[1] * b[i])
mulx 16(%rsi), %rax, %r11 // (rax, r11) = a[2] * b[0]
adox -3*8(%rbp),%r14 // r14 = hi(a[1] * b[i]) + lo(a[1] * b[i]) + tmp[1]
adcx %rax, %r15 // r15 = hi(a[1] * b[i]) + lo(a[2] * b[i])
mulx 24(%rsi), %rax, %rbx // (rax, rbx) = a[3] * b[0]
adox -2*8(%rbp),%r15 // r15 = hi(a[2] * b[i]) + lo(a[2] * b[i]) + tmp[2]
adcx %rax, %r11 // r11 = hi(a[2] * b[0]) + lo(a[3] * b[0])
adox -1*8(%rbp),%r11 // r11 = hi(a[2] * b[i]) + lo(a[3] * b[i]) + tmp[3]
adcx %r10,%rbx
movq %r12, %rdx
adox %r10,%rbx
imulq 16(%rsp),%rdx // 16(%rsp) save k0, r8 = t[0] * k0 = m', imulq will change CF
mulx (%rcx), %rax, %r8 // (rax, r8) = n[0] * m'
xorq %r10, %r10 // clear CF
adcx %rax, %r12 // r12 = lo(b[0] * a[0]) + lo(n[0] * m')
adox %r14, %r8 // r8 = hi(n[0] * m') + hi(a[0] * b[0]) + hi(n[0] * m')
mulx 8(%rcx), %rax, %rdi // (rdi, rax) = n[1] * m'
leaq 4*8(%rsi),%rsi // a offsets 4
adcx %rax, %r8
adox %r15, %rdi // r11 = hi(a[1] * b[0]) + lo(a[2] * b[0]) + hi(n[1] * m')
mulx 16(%rcx), %rax, %r15 // (rdi, rax) = n[2] * m'
movq %r8, -32(%rbp)
adcx %rax, %rdi
adox %r11, %r15 // r11 = hi(a[2] * b[0]) + lo(a[3] * b[0]) + hi(n[2] * m')
mulx 24(%rcx), %rax, %r11 // (rdi, rax) = n[3] * m'
movq %rdi, -24(%rbp)
adcx %rax, %r15
adox %r10, %r11 // r11 = hi(n[3] * m')
movq %r15, -16(%rbp)
leaq 4*8(%rcx),%rcx // n offsets 4
movq %rdx, %r8 // r8 = t[0] * k0 = m'
movq (%r13), %rdx // rdx = b[i]
movq 24(%rsp), %r9
.align 16
.Linner4x:
mulx (%rsi), %r12, %r14 // r14 = hi(a[4] * b[i]), r12 = lo(a[4] * b[i])
adcx %r10, %r11 // carry of previous round
adox %rbx, %r12 // r12 = hi(a[3] * b[i]) + lo(a[4] * b[i])
mulx 8(%rsi), %rax, %r15 // r15 = hi(a[5] * b[i]), rax = lo(a[5] * b[0])
adcx (%rbp), %r12 // r12 = hi(a[3] * b[i]) + lo(a[4] * b[i]) + tmp[4] --> 所以这里t不偏移
adox %rax, %r14 // r14 = hi(a[4] * b[i]) + lo(a[5] * b[0])
mulx 16(%rsi), %rax, %rdi // rax = hi(a[6] * b[i]), rax = lo(a[6] * b[i])
adcx 8(%rbp), %r14 // r12 = hi(a[3] * b[i]) + lo(a[4] * b[i]) + tmp[5]
adox %rax, %r15 // r15 = hi(a[5] * b[i]) + lo(a[6] * b[i])
mulx 24(%rsi), %rax, %rbx // rax = hi(a[7] * b[i]), rdi = lo(a[7] * b[i])
adcx 16(%rbp), %r15 // r12 = hi(a[3] * b[i]) + lo(a[4] * b[i]) + tmp[6]
adox %rax, %rdi // rbx = hi(a[6] * b[i]) + lo(a[7] * b[i])
adox %r10, %rbx // rbx += OF
adcx 24(%rbp), %rdi // rdi = hi(a[6] * b[i]) + lo(a[7] * b[i]) + tmp[7]
adcx %r10, %rbx // rbx += CF
// update rdx, begin cal n[i] * k0 * m
adox %r11,%r12 // r12 = hi(a[3] * b[i]) + lo(a[4] * b[i]) + hi(n[3] * m')
movq %r8, %rdx
mulx (%rcx), %rax, %r11 // (rax, r8) = n[4] * m'
leaq 4*8(%rbp), %rbp // tmp offsets 4
adcx %rax,%r12 // r12 = hi(a[3] * b[i]) + lo(b[4] * a[i])
// + hi(n[3] * m') + lo(n[4] * m')
adox %r14, %r11 // r8 = hi(a[4] * b[i]) + lo(a[5] * b[i]) + hi(n[4] * m')
mulx 8(%rcx), %rax, %r14 // (rax, r14) = n[5] * m'
leaq 4*8(%rsi), %rsi // a offsets 4
adcx %rax, %r11 // r8 = hi(a[4] * b[i]) + lo(a[5] * b[i])
// + hi(n[4] * m') + lo(n[5] * m')
adox %r15, %r14 // r14 = hi(a[5] * b[i]) + lo(a[6] * a[i])
// + ho(n[5] * m')
mulx 16(%rcx), %rax, %r15 // (rax, r15) = n[6] * m'
movq %r12, -5*8(%rbp)
adcx %rax, %r14 // r14 = hi(a[5] * b[i]) + lo(a[6] * b[i])
// + hi(n[5] * m') + lo(n[6] * m')
movq %r11, -4*8(%rbp)
adox %rdi, %r15 // r15 = hi(a[6] * b[i]) + lo(a[7] * b[i])
// + hi(n[6] * m')
mulx 24(%rcx), %rax, %r11 // (rax, r11) = n[7] * m'
movq %r14, -3*8(%rbp)
adcx %rax, %r15 // r15 = hi(a[6] * b[0]) + lo(a[7] * b[0])
// + hi(n[6] * m') + lo(n[7] * m')
adox %r10, %r11
movq %r15, -2*8(%rbp)
leaq 4*8(%rcx), %rcx // n offsets 4
movq (%r13), %rdx
dec %r9
jnz .Linner4x
movq 32(%rsp), %r15 // r15 = size * 8
leaq 8(%r13), %r13 // b offsets 1.
adcx %r10, %r11 // hi(n[7] * m') + OF + CF
subq 0*8(%rbp), %r10
adcx %r11, %rbx // hi(a[7] * b[0]) + hi(n[7] * m')
sbbq %r11,%r11
movq %rbx, -1*8(%rbp)
cmp 40(%rsp), %r13
jne .LoopOuter4x
leaq 48(%rsp),%rbp // rbp = tmp[0]
subq %r15, %rcx // rcx= n[0]
negq %r11
movq 24(%rsp), %rdx // rdx = size/4
movq 8(%rsp), %rdi // get r[0]
// cal tmp - n
movq 0(%rbp), %rax // rax = tmp[0]
movq 8(%rbp), %rbx // rbx = tmp[1]
movq 16(%rbp), %r10 // r10 = tmp[2]
movq 24(%rbp), %r12 // r12 = tmp[3]
leaq 32(%rbp), %rbp // tmp += 4
subq 0(%rcx), %rax // tmp[0] - n[0]
sbbq 8(%rcx), %rbx // tmp[1] - n[1]
sbbq 16(%rcx), %r10 // tmp[2] - n[2]
sbbq 24(%rcx), %r12 // tmp[3] - n[3]
leaq 32(%rcx), %rcx // n += 4
movq %rax, 0(%rdi) // r save the tmp - n
movq %rbx, 8(%rdi)
movq %r10, 16(%rdi)
movq %r12, 24(%rdi)
leaq 32(%rdi), %rdi // r += 4
.LoopSub4x:
movq 0(%rbp), %rax // rax = tmp[0]
movq 8(%rbp), %rbx // rbx = tmp[1]
movq 16(%rbp), %r10 // r10 = tmp[2]
movq 24(%rbp), %r12 // r12 = tmp[3]
leaq 32(%rbp), %rbp
sbbq 0(%rcx), %rax // tmp[0] - n[0]
sbbq 8(%rcx), %rbx // tmp[1] - n[1]
sbbq 16(%rcx), %r10 // tmp[2] - n[2]
sbbq 24(%rcx), %r12 // tmp[3] - n[3]
leaq 32(%rcx), %rcx
movq %rax, 0(%rdi)
movq %rbx, 8(%rdi)
movq %r10, 16(%rdi)
movq %r12, 24(%rdi)
leaq 32(%rdi), %rdi
decq %rdx // j--
jnz .LoopSub4x // if j != 0
sbbq $0,%r11 // cancellation of highest carry
subq %r15, %rbp // rbp = tmp[0]
subq %r15, %rdi // r = n[0]
movq 24(%rsp), %r10 // r10 = size/4 - 1
pxor %xmm2,%xmm2 // xmm0 = 0
movq %r11, %xmm0
pcmpeqd %xmm1,%xmm1 // xmm5 = -1
pshufd $0,%xmm0,%xmm0
pxor %xmm0,%xmm1
xorq %rax,%rax
movdqa (%rbp,%rax),%xmm5 // Copy the result to r.
movdqu (%rdi,%rax),%xmm3
pand %xmm0,%xmm5
pand %xmm1,%xmm3
movdqa 16(%rbp,%rax),%xmm4
movdqu %xmm2,(%rbp,%rax)
por %xmm3,%xmm5
movdqu 16(%rdi,%rax),%xmm3
movdqu %xmm5,(%rdi,%rax)
pand %xmm0,%xmm4
pand %xmm1,%xmm3
movdqa %xmm2,16(%rbp,%rax)
por %xmm3,%xmm4
movdqu %xmm4,16(%rdi,%rax)
leaq 32(%rax),%rax
.align 16
.LoopCopy4x:
movdqa (%rbp,%rax),%xmm5
movdqu (%rdi,%rax),%xmm3
pand %xmm0,%xmm5
pand %xmm1,%xmm3
movdqa 16(%rbp,%rax),%xmm4
movdqu %xmm2,(%rbp,%rax)
por %xmm3,%xmm5
movdqu 16(%rdi,%rax),%xmm3
movdqu %xmm5,(%rdi,%rax)
pand %xmm0,%xmm4
pand %xmm1,%xmm3
movdqa %xmm2,16(%rbp,%rax)
por %xmm3,%xmm4
movdqu %xmm4,16(%rdi,%rax)
leaq 32(%rax),%rax
decq %r10 // j--
jnz .LoopCopy4x
movq 0(%rsp),%rsi // rsi = pressed-stacked rsp.
movq $1,%rax
leaq (%rsi),%rsp // Restore srsp.
RESTORE_REGISTERS
ret
.cfi_endproc
.size MontMul4x,.-MontMul4x
.type MontSqr8x,@function
.align 32
MontSqr8x:
.cfi_startproc
SAVE_REGISTERS
movq %rsp,%rax
movl %r9d,%r15d
shll $3,%r9d // Calculate size * 8 bytes.
shlq $5,%r15 // size * 8 * 4
negq %r9
leaq -64(%rsp,%r9,2),%r14 // r14 = rsp[size * 2 - 8]
subq %rsi,%r14
andq $4095,%r14
movq %rsp,%rbp
cmpq %r14,%r15
jae .Loop8xCheckstk
leaq 4032(,%r9,2),%r15 // r15 = 4096 - frame - 2 * size
subq %r15,%r14
movq $0,%r15
cmovcq %r15,%r14
.Loop8xCheckstk:
subq %r14,%rbp
leaq -96(%rbp,%r9,2),%rbp // Allocate a frame + 2 x size.
andq $-64,%rbp // __checkstk implementation,
// which is invoked when the stack size needs to exceed one page.
movq %rsp,%r14
subq %rbp,%r14
andq $-4096,%r14
leaq (%r14,%rbp),%rsp
cmpq %rbp,%rsp
jbe .LoopMul8x
.align 16
.LoopPage8x:
leaq -4096(%rsp),%rsp // Change sp - 4096 each time until sp <= the space to be allocated
cmpq %rbp,%rsp
ja .LoopPage8x
.LoopMul8x:
movq %r9,%r15 // r15 = -size * 8
negq %r9 // Restoresize.
movq %r8,32(%rsp) // Save the values of k0 and sp.
movq %rax,40(%rsp)
movq %rcx, %xmm1 // Pointer to saving n.
pxor %xmm2,%xmm2 // xmm0 = 0
movq %rdi, %xmm0 // Pointer to saving r.
movq %r15, %xmm5 // Save size.
call MontSqr8Inner
leaq (%rdi,%r9),%rbx // rbx = t[size]
movq %r9,%rcx // rcx = -size
movq %r9,%rdx // rdx = -size
movq %xmm0, %rdi // rdi = r
sarq $5,%rcx // rcx >>= 5
.align 32
/* T -= N */
.LoopSub8x:
movq (%rbx),%r13 // r13 = t[i]
movq 8(%rbx),%r12 // r12 = t[i + 1]
movq 16(%rbx),%r11 // r11 = t[i + 2]
movq 24(%rbx),%r10 // r10 = t[i + 3]
sbbq (%rbp),%r13 // r13 = t[i] - (n[i] + CF)
sbbq 8(%rbp),%r12 // r12 = t[i + 1] - (n[i + 1] + CF)
sbbq 16(%rbp),%r11 // r11 = t[i + 2] - (n[i + 2] + CF)
sbbq 24(%rbp),%r10 // r10 = t[i + 3] - (n[i + 3] + CF)
movq %r13,0(%rdi) // Assigning value to r.
movq %r12,8(%rdi)
movq %r11,16(%rdi)
movq %r10,24(%rdi)
leaq 32(%rbp),%rbp // n += 4
leaq 32(%rdi),%rdi // r += 4
leaq 32(%rbx),%rbx // t += 4
incq %rcx
jnz .LoopSub8x
sbbq $0,%rax // rax -= CF
leaq (%rbx,%r9),%rbx
leaq (%rdi,%r9),%rdi
movq %rax,%xmm0
pxor %xmm2,%xmm2
pshufd $0,%xmm0,%xmm0
movq 40(%rsp),%rsi // rsi = pressed-stacked rsp.
.align 32
.LoopCopy8x:
movdqa 0(%rbx),%xmm1 // Copy the result to r.
movdqa 16(%rbx),%xmm5
leaq 32(%rbx),%rbx
movdqu 0(%rdi),%xmm3
movdqu 16(%rdi),%xmm4
leaq 32(%rdi),%rdi
movdqa %xmm2,-32(%rbx)
movdqa %xmm2,-16(%rbx)
movdqa %xmm2,-32(%rbx,%rdx)
movdqa %xmm2,-16(%rbx,%rdx)
pcmpeqd %xmm0,%xmm2
pand %xmm0,%xmm1
pand %xmm0,%xmm5
pand %xmm2,%xmm3
pand %xmm2,%xmm4
pxor %xmm2,%xmm2
por %xmm1,%xmm3
por %xmm5,%xmm4
movdqu %xmm3,-32(%rdi)
movdqu %xmm4,-16(%rdi)
addq $32,%r9
jnz .LoopCopy8x
movq $1,%rax
leaq (%rsi),%rsp // Restore rsp.
RESTORE_REGISTERS // Restore non-volatile register.
ret
.cfi_endproc
.size MontSqr8x,.-MontSqr8x
.type MontSqr8Inner,@function
.align 32
MontSqr8Inner:
.cfi_startproc
movq %rsi, %r8
addq %r9, %r8
movq %r8, 64(%rsp) // save a[size]
movq %r9, 56(%rsp) // save size * 8
leaq 88(%rsp), %rbp // tmp的首地址
leaq 88(%rsp,%r9,2),%rbx
movq %rbx,16(%rsp) // t[size * 2]
leaq (%rcx,%r9),%rax
movq %rax,8(%rsp) // n[size]
jmp .MontSqr8xBegin
.MontSqr8xInitStack:
movdqa %xmm2,0*8(%rbp)
movdqa %xmm2,2*8(%rbp)
movdqa %xmm2,4*8(%rbp)
movdqa %xmm2,6*8(%rbp)
.MontSqr8xBegin:
movdqa %xmm2,8*8(%rbp)
movdqa %xmm2,10*8(%rbp)
movdqa %xmm2,12*8(%rbp)
movdqa %xmm2,14*8(%rbp)
lea 128(%rbp), %rbp
subq $64, %r9
jnz .MontSqr8xInitStack
xorq %rbx, %rbx // clear CF OF
movq $0, %r13
movq $0, %r12
movq $0, %r11
movq $0, %rdi
movq $0, %r15
movq $0, %rcx
leaq 88(%rsp), %rbp // set tmp[0]
movq 0(%rsi), %rdx // rdx = a[0]
movq $0, %r10
.LoopOuterSqr8x:
// begin a[0] * a[1~7]
mulx 8(%rsi), %rax, %r14 // rax = lo(a[1] * a[0]), r14 = hi(a[1] * a[0])
adcx %rbx, %rax
movq %rax, 8(%rbp)
adox %r13, %r14
mulx 16(%rsi), %rax, %r13 // (rax, r13) = a[2] * a[0]
adcx %rax, %r14 // r14 = hi(a[1] * a[0]) + lo(a[2] * a[0])
adox %r12, %r13
mulx 24(%rsi), %rax, %r12 // (rax, r12) = a[3] * a[0]
movq %r14, 16(%rbp)
adcx %rax, %r13 // r13 = hi(a[2] * a[0]) + lo(a[3] * a[0])
adox %r11, %r12
mulx 32(%rsi), %rax, %r11 // (rax, r11) = a[4] * a[0]
adcx %rax, %r12 // r12 = hi(a[3] * a[0]) + lo(a[4] * a[0])
adox %rdi, %r11
mulx 40(%rsi), %rax, %rdi // (rax, rdi) = a[5] * a[0]
adcx %rax, %r11 // r11 = hi(a[4] * a[0]) + lo(a[5] * a[0])
adox %r15, %rdi
mulx 48(%rsi), %rax, %r8 // (rax, r8) = a[6] * a[0]
adcx %rax, %rdi // rdi = hi(a[5] * a[0]) + lo(a[6] * a[0])
adox %rcx, %r8
mulx 56(%rsi), %rax, %rbx // (rax, rbx) = a[7] * a[0]
adcx %rax, %r8 // r8 = hi(a[6] * a[0]) + lo(a[7] * a[0])
adox %r10, %rbx // rbx += CF
adcq 64(%rbp), %rbx // rbx += CF
sbbq %r9, %r9 // get high CF
xorq %r10, %r10 // clear CF OF
// begin a[1] * a[2~7]
movq 8(%rsi), %rdx // rdx = a[1]
mulx 16(%rsi), %rax, %rcx // rax = lo(a[2] * a[1]), rcx = hi(a[2] * a[1])
adcx %rax, %r13 // r13 = hi(a[2] * a[0]) + lo(a[3] * a[0]) + lo(a[2] * a[1])
mulx 24(%rsi), %rax, %r14 // rax = lo(a[3] * a[1]), r14 = hi(a[3] * a[1])
movq %r13, 24(%rbp)
adox %rax, %rcx // rcx = lo(a[3] * a[1]) + hi(a[2] * a[1])
mulx 32(%rsi), %rax, %r13 // (rax, r13) = a[4] * a[1]
adcx %r12, %rcx // rcx = hi(a[3] * a[0]) + lo(a[4] * a[0]) + lo(a[3] * a[1]) + hi(a[2] * a[1])
adox %rax, %r14 // r14 = lo(a[4] * a[1]) + hi(a[3] * a[1])
mulx 40(%rsi), %rax, %r12 // (rax, r12) = a[5] * a[1]
movq %rcx, 32(%rbp)
adcx %r11, %r14 // r14 = lo(a[4] * a[1]) + hi(a[3] * a[1]) + hi(a[4] * a[0]) + lo(a[5] * a[0])
adox %rax, %r13 // r13 = lo(a[5] * a[1]) + hi(a[4] * a[1])
mulx 48(%rsi), %rax, %r11 // (rax, r11) = a[6] * a[1]
adcx %rdi, %r13 // r13 = lo(a[5] * a[1]) + hi(a[4] * a[1]) + hi(a[5] * a[0]) + lo(a[6] * a[0])
adox %rax, %r12 // r12 = hi(a[5] * a[1]) + lo(a[6] * a[1])
mulx 56(%rsi), %rax, %rdi // (rax, rdi) = a[7] * a[1]
adcx %r8, %r12 // r12 = hi(a[5] * a[1]) + lo(a[6] * a[1]) + hi(a[6] * a[0]) + lo(a[7] * a[0])
adox %rax, %r11 // r11 = hi(a[6] * a[1]) + lo(a[7] * a[1])
adcx %rbx, %r11 // r11 = hi(a[6] * a[1]) + lo(a[7] * a[1]) + hi(a[7] * a[0])
adcx %r10, %rdi // rdi += CF
adox %r10, %rdi // rdi += OF
movq 16(%rsi), %rdx // rdx = a[2]
// begin a[2] * a[3~7]
mulx 24(%rsi), %rax, %rbx // rax = lo(a[2] * a[3]), rbx = hi(a[2] * a[3])
adcx %rax, %r14 // r14 = lo(a[4] * a[1]) + hi(a[3] * a[1]) + hi(a[4] * a[0]) + lo(a[5] * a[0])
// + lo(a[2] * a[3])
mulx 32(%rsi), %rax, %rcx // rax = lo(a[2] * a[4]), rcx = hi(a[2] * a[4])
movq %r14, 40(%rbp)
adox %rax, %rbx // r13 = lo(a[2] * a[4]) + hi(a[2] * a[3])
mulx 40(%rsi), %rax, %r8 // rax = lo(a[2] * a[5]), rcx = hi(a[2] * a[5])
adcx %r13, %rbx // rbx = lo(a[2] * a[4]) + hi(a[2] * a[3])
// + lo(a[5] * a[1]) + hi(a[4] * a[1]) + hi(a[5] * a[0]) + lo(a[6] * a[0])
adox %rax, %rcx // rcx = hi(a[2] * a[4]) + lo(a[2] * a[5])
movq %rbx, 48(%rbp)
mulx 48(%rsi), %rax, %r13 // rax = lo(a[2] * a[6]), r13 = hi(a[2] * a[6])
adcx %r12, %rcx // rcx = hi(a[5] * a[1]) + lo(a[6] * a[1]) + hi(a[6] * a[0])
// + lo(a[7] * a[0]) + hi(a[2] * a[4]) + lo(a[2] * a[5])
adox %rax, %r8 // r8 = hi(a[2] * a[5]) + lo(a[2] * a[6])
mulx 56(%rsi), %rax, %r12 // rax = lo(a[2] * a[7]), r12 = hi(a[2] * a[7])
adcx %r11, %r8 // r8 = hi(a[2] * a[5]) + lo(a[2] * a[6])
// + hi(a[6] * a[1]) + lo(a[7] * a[1]) + hi(a[7] * a[0])
adox %rax, %r13 // r13 = hi(a[2] * a[6]) + lo(a[2] * a[7])
adcx %rdi, %r13 // r13 = hi(a[2] * a[6]) + lo(a[2] * a[7]) + hi(a[7] * a[1])
adcx %r10, %r12 // r12 += CF
adox %r10, %r12 // r12 += OF
movq 24(%rsi), %rdx // rdx = a[3]
// begin a[3] * a[4~7]
mulx 32(%rsi), %rax, %r14 // rax = lo(a[3] * a[4]), r14 = hi(a[3] * a[4])
adcx %rax, %rcx // rcx = hi(a[5] * a[1]) + lo(a[6] * a[1]) + hi(a[6] * a[0])
// + lo(a[7] * a[0]) + hi(a[2] * a[4]) + lo(a[2] * a[5]) + lo(a[3] * a[4])
mulx 40(%rsi), %rax, %rbx // rax = lo(a[3] * a[5]), rbx = hi(a[3] * a[5])
adox %rax, %r14 // r14 = hi(a[3] * a[4]) + lo(a[3] * a[5])
mulx 48(%rsi), %rax, %r11 // rax = lo(a[3] * a[6]), r11 = hi(a[3] * a[6])
adcx %r8, %r14 // r14 = hi(a[3] * a[4]) + lo(a[3] * a[5])+ hi(a[2] * a[5]) + lo(a[2] * a[6])
// + hi(a[6] * a[1]) + lo(a[7] * a[1]) + hi(a[7] * a[0])
adox %rax, %rbx // rbx = hi(a[3] * a[5]) + lo(a[3] * a[6])
mulx 56(%rsi), %rax, %rdi // rax = lo(a[3] * a[7]), rdi = hi(a[3] * a[7])
adcx %r13, %rbx // rbx = hi(a[3] * a[5]) + lo(a[3] * a[6])
// + hi(a[2] * a[6]) + lo(a[2] * a[7]) + hi(a[7] * a[1])
adox %rax, %r11 // r11 = hi(a[3] * a[6]) + lo(a[3] * a[7])
adcx %r12, %r11 // r11 = hi(a[2] * a[7]) + hi(a[3] * a[6]) + lo(a[3] * a[7])
adcx %r10, %rdi // rdi += CF
adox %r10, %rdi // rdi += OF
movq %rcx, 56(%rbp)
movq %r14, 64(%rbp)
movq 32(%rsi), %rdx // rdx = a[4]
// begin a[4] * a[5~7]
mulx 40(%rsi), %rax, %r13 // rax = lo(a[4] * a[5]), r13 = hi(a[4] * a[5])
adcx %rax, %rbx // rbx = hi(a[3] * a[5]) + lo(a[3] * a[6])
// + hi(a[2] * a[6]) + lo(a[2] * a[7]) + hi(a[7] * a[1]) + lo(a[4] * a[5])
mulx 48(%rsi), %rax, %r12 // rax = lo(a[4] * a[6]), r12 = hi(a[4] * a[6])
adox %rax, %r13 // r13 = lo(a[4] * a[6]) + hi(a[4] * a[5])
mulx 56(%rsi), %rax, %r14 // rax = lo(a[4] * a[7]), r14 = hi(a[4] * a[7])
adcx %r11, %r13 // r13 = hi(a[4] * a[5]) + hi(a[2] * a[7]) + hi(a[3] * a[6])
// + lo(a[3] * a[7])
adox %rax, %r12 // r12 = hi(a[4] * a[6]) + lo(a[4] * a[7])
adcx %rdi, %r12 // r12 = hi(a[4] * a[6]) + lo(a[4] * a[7]) + hi(a[3] * a[7])
adcx %r10, %r14 // r14 += CF
adox %r10, %r14 // r14 += OF
movq 40(%rsi), %rdx // rdx = a[5]
// begin a[5] * a[6~7]
mulx 48(%rsi), %rax, %r11 // rax = lo(a[5] * a[6]), r11 = hi(a[5] * a[6])
adcx %rax, %r12 // r14 = hi(a[4] * a[6]) + lo(a[4] * a[7]) + hi(a[3] * a[7]) + lo(a[5] * a[6])
mulx 56(%rsi), %rax, %rdi // rax = lo(a[5] * a[7]), rdi = hi(a[5] * a[7])
adox %rax, %r11 // r11 = hi(a[5] * a[6]) + lo(a[5] * a[7])
adcx %r14, %r11 // r11 = hi(a[5] * a[6]) + lo(a[5] * a[7]) + hi(a[4] * a[7])
adcx %r10, %rdi // rdi += CF
adox %r10, %rdi // rdi += OF
movq 48(%rsi), %rdx // rdx = a[6]
mulx 56(%rsi), %rax, %r15 // rax = lo(a[7] * a[6]), r15 = hi(a[7] * a[6])
adcx %rax, %rdi // rdi = hi(a[5] * a[6]) + lo(a[7] * a[6])
adcx %r10, %r15 // r15 += CF
leaq 64(%rsi), %rsi
cmp 64(%rsp), %rsi // cmpared with a[size]
je .Lsqrx8xEnd
neg %r9
movq $0, %rcx
movq 64(%rbp),%r14
adcx 9*8(%rbp),%rbx
adcx 10*8(%rbp),%r13
adcx 11*8(%rbp),%r12
adcx 12*8(%rbp),%r11
adcx 13*8(%rbp),%rdi
adcx 14*8(%rbp),%r15
adcx 15*8(%rbp),%rcx
leaq (%rsi), %r10 // r10 = a[8]
leaq 128(%rbp), %rbp
sbbq %rax,%rax
movq %rax, 72(%rsp)
movq %rbp, 80(%rsp)
xor %eax, %eax
movq -64(%rsi), %rdx
movq $-8, %r9
.align 32
.LoopSqr8x:
movq %r14,%r8
// begin a[0] * a[8~11]
mulx 0(%r10), %rax, %r14 // rax = lo(a[8] * a[0]), r14 = hi(a[8] * a[0])
adcx %rax, %r8
adox %rbx, %r14
mulx 8(%r10), %rax, %rbx // rax = lo(a[9] * a[0]), rbx = hi(a[8] * a[0])
adcx %rax, %r14
adox %r13, %rbx
movq %r8,(%rbp,%r9,8)
mulx 16(%r10), %rax, %r13 // rax = lo(a[10] * a[0]), r13 = hi(a[10] * a[0])
adcx %rax, %rbx
adox %r12, %r13
mulx 24(%r10), %rax, %r12 // rax = lo(a[11] * a[0]), r12 = hi(a[11] * a[0])
adcx %rax, %r13
adox %r11, %r12
movq $0, %r8
mulx 32(%r10), %rax, %r11 // rax = lo(a[12] * a[0]), r11 = hi(a[12] * a[0])
adcx %rax, %r12
adox %rdi, %r11
mulx 40(%r10), %rax, %rdi // rax = lo(a[13] * a[0]), rdi = hi(a[13] * a[0])
adcx %rax, %r11
adox %r15, %rdi
mulx 48(%r10), %rax, %r15 // rax = lo(a[14] * a[0]), r15 = hi(a[14] * a[0])
adcx %rax, %rdi
adox %rcx, %r15
mulx 56(%r10), %rax, %rcx // rax = lo(a[15] * a[0]), rcx = hi(a[15] * a[0])
adcx %rax, %r15
adcx %r8, %rcx // here r8 = 0
adox %r8, %rcx
movq 8(%rsi,%r9,8),%rdx
inc %r9
jnz .LoopSqr8x
leaq 64(%r10), %r10
movq $-8, %r9
cmp 64(%rsp), %r10 // cmpared with a[size]
je .LoopSqr8xBreak
subq 72(%rsp), %r8 // read the CF of the previous round.
movq -64(%rsi), %rdx
adcx 0*8(%rbp),%r14
adcx 1*8(%rbp),%rbx
adcx 2*8(%rbp),%r13
adcx 3*8(%rbp),%r12
adcx 4*8(%rbp),%r11
adcx 5*8(%rbp),%rdi
adcx 6*8(%rbp),%r15
adcx 7*8(%rbp),%rcx
leaq 8*8(%rbp),%rbp
sbbq %rax, %rax
xorq %r8, %r8
movq %rax, 72(%rsp)
jmp .LoopSqr8x
.align 32
.LoopSqr8xBreak:
xorq %r10, %r10
subq 72(%rsp),%r8
adcx %r10, %r14
movq 0(%rsi),%rdx
movq %r14,0(%rbp)
movq 80(%rsp), %r8
adcx %r10,%rbx
adcx %r10,%r13
adcx %r10,%r12
adcx %r10,%r11
adcx %r10,%rdi
adcx %r10,%r15
adcx %r10,%rcx
cmp %r8, %rbp
je .LoopOuterSqr8x
// if tmp does not go to the end. The current value needs to be stored in tmp and updated.
movq %rbx,1*8(%rbp)
movq 1*8(%r8),%rbx
movq %r13,2*8(%rbp)
movq 2*8(%r8),%r13
movq %r12,3*8(%rbp)
movq 3*8(%r8),%r12
movq %r11,4*8(%rbp)
movq 4*8(%r8),%r11
movq %rdi,5*8(%rbp)
movq 5*8(%r8),%rdi
movq %r15,6*8(%rbp)
movq 6*8(%r8),%r15
movq %rcx,7*8(%rbp)
movq 7*8(%r8),%rcx
movq %r8,%rbp
jmp .LoopOuterSqr8x
.align 32
.Lsqrx8xEnd:
mov %rbx,9*8(%rbp)
mov %r13,10*8(%rbp)
mov %r12,11*8(%rbp)
mov %r11,12*8(%rbp)
mov %rdi,13*8(%rbp)
mov %r15,14*8(%rbp)
leaq 88(%rsp), %rbp // tmp[0]
movq 56(%rsp), %rcx // rcx = size * 8
sbbq %rcx, %rsi // get a[0]
xorq %r15, %r15 // clear CF OF, r15 = tmp[0] = 0
movq 8(%rbp), %r14 // r14 = tmp[1]
movq 16(%rbp), %r13 // r13 = tmp[2]
movq 24(%rbp), %r12 // r12 = tmp[3]
adox %r14, %r14 // r14 = 2 * tmp[1]
movq 0(%rsi), %rdx
.align 32
.LoopShiftAddSqr4x:
mulx %rdx, %rax, %rbx // (rbx, rax) = a[0] * a[0]
adox %r13, %r13 // r13 = 2 * tmp[1]
adox %r12, %r12 // r12 = 2 * tmp[3]
adcx %rax, %r15 // r15 = 2 * tmp[0] + lo(a[0] * a[0])
adcx %rbx, %r14 // r14 = 2 * tmp[1] + hi(a[0] * a[0])
movq %r15, (%rbp)
movq %r14, 8(%rbp)
movq 8(%rsi), %rdx
mulx %rdx, %rax, %rbx // (rbx, rax) = a[1] * a[1]
adcx %rax, %r13 // r13 = 2 * tmp[2] + lo(a[1] * a[1])
adcx %rbx, %r12 // r12 = 2 * tmp[3] + hi(a[1] * a[1])
movq %r13, 16(%rbp)
movq %r12, 24(%rbp)
movq 32(%rbp), %r15 // r15 = tmp[4]
movq 40(%rbp), %r14 // r14 = tmp[5]
movq 48(%rbp), %r13 // r13 = tmp[6]
movq 56(%rbp), %r12 // r12 = tmp[7]
movq 16(%rsi), %rdx
mulx %rdx, %rax, %rbx // (rbx, rax) = a[2] * a[2]
adox %r15, %r15 // r15 = 2 * tmp[4]
adcx %rax, %r15 // r15 = 2 * tmp[4] + lo(a[2] * a[2])
adox %r14, %r14 // r14 = 2 * tmp[4]
adcx %rbx, %r14 // r14 = 2 * tmp[5] + hi(a[2] * a[2])
movq %r15, 32(%rbp)
movq %r14, 40(%rbp)
movq 24(%rsi), %rdx
mulx %rdx, %rax, %rbx // (rbx, rax) = a[3] * a[3]
adox %r13, %r13 // r13 = 2 * tmp[5]
adcx %rax, %r13 // r13 = 2 * tmp[5] + lo(a[3] * a[3])
adox %r12, %r12 // r12 = 2 * tmp[5]
adcx %rbx, %r12 // rbx = 2 * tmp[5] + hi(a[3] * a[3])
movq %r13, 48(%rbp)
movq %r12, 56(%rbp)
leaq 32(%rsi), %rsi // a[4]
leaq -32(%rcx),%rcx
jrcxz .LoopReduceSqr8xBegin // if i != 0
movq 64(%rbp), %r15 // r15 = tmp[8]
movq 72(%rbp), %r14 // r14 = tmp[9]
adox %r15, %r15 // r15 = 2 * tmp[8]
adox %r14, %r14 // r14 = 2 * tmp[9]
movq 80(%rbp), %r13 // r13 = tmp[8]
movq 88(%rbp), %r12 // r12 = tmp[9]
leaq 64(%rbp), %rbp
movq 0(%rsi), %rdx
jmp .LoopShiftAddSqr4x // if i != 0
.LoopReduceSqr8xBegin:
xorq %rax,%rax // rax = 0
leaq 88(%rsp), %rdi // tmp[0]
movq $0, %r9 // Save size.
movq %xmm1, %rbp // get n[0]
xorq %rsi, %rsi // rsi = 0
.align 32
.LoopReduceSqr8x:
movq %rax,80(%rsp) // Store the highest carry bit.
leaq (%rdi,%r9),%rdi // rdi = t[0]
movq (%rdi),%rdx // rdx = t[0]
movq 8(%rdi),%r9 // r9 = t[1]
movq 16(%rdi),%r15 // r15 = t[2]
movq 24(%rdi),%r14 // r14 = t[3]
movq 32(%rdi),%r13 // r13 = t[4]
movq 40(%rdi),%r12 // r12 = t[5]
movq 48(%rdi),%r11 // r11 = t[6]
movq 56(%rdi),%r10 // r10 = t[7]
leaq 64(%rdi),%rdi // rdi = t[8]
movq %rdx,%r8 // r8 = t[0]
imulq 40(%rsp),%rdx // rbx = k0 * t[0]
xorq %rbx,%rbx // clear CF OF
movl $8,%ecx
.align 32
.LoopReduce8x:
movq %r8, %rbx
movq %rdx, 80(%rsp,%rcx,8)
mulx (%rbp), %rax, %r8 // (r8, rax) = m' * n[0]
adcx %rbx, %rax
adox %r9, %r8 // r9 = hi(m' * n[]) + t[1]
mulx 8(%rbp), %rax, %r9 // (rdx, r9) = m' * n[0]
adcx %rax,%r8 // r9 = t[1] + lo(m' * n[1])
adox %r9, %r15 // r15 = hi(m' * n[1]) + t[2]
mulx 16(%rbp), %r9, %rax // (r9, rax) = m' * n[2]
adcx %r15, %r9 // r9 = hi(m' * n[1]) + lo(m' * n[2]) + t[2]
adox %rax, %r14 // rbx = hi(m' * n[2]) + t[3]
mulx 24(%rbp), %r15, %rax // (r15, rax) = m' * n[3]
adcx %r14,%r15 // r15 = hi(m' * n[2]) + lo(m' * n[3]) + t[3]
adox %rax,%r13 // r13 = hi(m' * n[3]) + t[4]
mulx 32(%rbp), %r14, %rax // (r14, rax) = m' * n[4]
adcx %r13,%r14 // r14 = hi(m' * n[3]) + lo(m' * n[4]) + t[4]
adox %rax,%r12 // r12 = hi(m' * n[4]) + t[5]
mulx 40(%rbp), %r13, %rax // (r13, rax) = m' * n[5]
adcx %r12,%r13 // r13 = hi(m' * n[4]) + lo(m' * n[5]) + t[5]
adox %rax,%r11 // r12 = hi(m' * n[5]) + t[6]
mulx 48(%rbp), %r12, %rax // (r12, rax) = m' * n[6]
adcx %r11,%r12 // r13 = hi(m' * n[5]) + lo(m' * n[6]) + t[6]
adox %r10,%rax // r12 = hi(m' * n[5]) + t[7]
mulx 56(%rbp), %r11, %r10 // (r11, r10) = m' * n[7]
adcx %rax,%r11 // r13 = hi(m' * n[6]) + lo(m' * n[7]) + t[7]
adcx %rsi,%r10 // r12 = hi(m' * n[7]) + t[8]
adox %rsi,%r10 // r12 = hi(m' * n[7]) + t[8]
movq %r8, %rdx
mulx 40(%rsp), %rdx, %rax // (rdx, rax) = m' * n[7]
decl %ecx // ecx--
jnz .LoopReduce8x // if ecx != 0
leaq 64(%rbp),%rbp // rbp += 64, n Pointer Offset.
xorq %rax,%rax // rax = 0
cmpq 8(%rsp),%rbp // rbp = n[size]
jae .LoopEndCondMul8x
addq (%rdi),%r8 // r8 += t[0]
adcq 8(%rdi),%r9 // r9 += t[1]
adcq 16(%rdi),%r15 // r15 += t[2]
adcq 24(%rdi),%r14 // r14 += t[3]
adcq 32(%rdi),%r13 // r13 += t[4]
adcq 40(%rdi),%r12 // r12 += t[5]
adcq 48(%rdi),%r11 // r11 += t[6]
adcq 56(%rdi),%r10 // r10 += t[7]
sbbq %rsi,%rsi // rsi = -CF
movq 144(%rsp),%rdx // rbx = m', 80 + 64
movl $8,%ecx
xor %eax,%eax
.align 32
.LoopLastSqr8x:
mulx (%rbp), %rax, %rbx // (rbx, rax) = m' * n[0]
adcx %rax,%r8 // r8 = lo(m' * n[0]) + t[0]
movq %r8,(%rdi) // t[0] = r8
leaq 8(%rdi),%rdi // t++
adox %rbx,%r9 // r9 = hi(m' * n[]) + t[2]
mulx 8(%rbp), %r8, %rbx // (r8, rbx) = m' * n[0]
adcx %r9,%r8 // r9 = t[1] + lo(m' * n[1])
adox %rbx, %r15 // r15 = hi(m' * n[1]) + t[2]
mulx 16(%rbp), %r9, %rbx // (r9, rbx) = m' * n[2]
adcx %r15, %r9 // r9 = hi(m' * n[1]) + lo(m' * n[2]) + t[2]
adox %rbx, %r14 // r14 = hi(m' * n[2]) + t[3]
mulx 24(%rbp), %r15, %rbx // (r15, rbx) = m' * n[3]
adcx %r14,%r15 // r15 = hi(m' * n[2]) + lo(m' * n[3]) + t[3]
adox %rbx,%r13 // r13 = hi(m' * n[3]) + t[4]
mulx 32(%rbp), %r14, %rbx // (r14, rbx) = m' * n[4]
adcx %r13,%r14 // r14 = hi(m' * n[3]) + lo(m' * n[4]) + t[4]
adox %rbx,%r12 // r12 = hi(m' * n[4]) + t[5]
mulx 40(%rbp), %r13, %rbx // (r13, rbx) = m' * n[5]
adcx %r12,%r13 // r13 = hi(m' * n[4]) + lo(m' * n[5]) + t[5]
adox %rbx,%r11 // r11 = hi(m' * n[5]) + t[6]
mulx 48(%rbp), %r12, %rbx // (r12, rbx) = m' * n[6]
adcx %r11,%r12 // r12 = hi(m' * n[5]) + lo(m' * n[6]) + t[6]
adox %r10,%rbx // rbx = hi(m' * n[5]) + t[7]
movq $0, %rax
mulx 56(%rbp), %r11, %r10 // (r11, r10) = m' * n[7]
adcx %rbx,%r11 // r11 = hi(m' * n[6]) + lo(m' * n[7]) + t[7]
adcx %rax,%r10 // r10 = hi(m' * n[7]) + t[8]
adox %rax,%r10 // r10 = hi(m' * n[7]) + t[8]
movq 72(%rsp,%rcx,8),%rdx // rbx = t[i] * k0
decl %ecx // ecx--
jnz .LoopLastSqr8x // if ecx != 0
leaq 64(%rbp),%rbp // n += 8
cmpq 8(%rsp),%rbp // Check whether rbp is at the end of the n array. If yes, exit the loop.
jae .LoopSqrBreak8x
movq 144(%rsp),%rdx // rbx = m'
negq %rsi // rsi = CF
movq (%rbp),%rax // rax = = n[0]
adcq (%rdi),%r8 // r8 = t[0]
adcq 8(%rdi),%r9 // r9 = t[1]
adcq 16(%rdi),%r15 // r15 = t[2]
adcq 24(%rdi),%r14 // r14 = t[3]
adcq 32(%rdi),%r13 // r13 = t[4]
adcq 40(%rdi),%r12 // r12 = t[5]
adcq 48(%rdi),%r11 // r11 = t[6]
adcq 56(%rdi),%r10 // r10 = t[7]
sbbq %rsi,%rsi // rsi = -CF
movl $8,%ecx // ecx = 8
xorq %rax, %rax
jmp .LoopLastSqr8x
.align 32
.LoopSqrBreak8x:
xorq %rax,%rax // rax = 0
addq 80(%rsp),%r8 // r8 += Highest carry bit.
adcq $0,%r9 // r9 += CF
adcq $0,%r15 // r15 += CF
adcq $0,%r14 // r14 += CF
adcq $0,%r13 // r13 += CF
adcq $0,%r12 // r12 += CF
adcq $0,%r11 // r11 += CF
adcq $0,%r10 // r10 += CF
adcq $0,%rax // rax += CF
negq %rsi // rsi = CF
.LoopEndCondMul8x:
adcq (%rdi),%r8 // r8 += t[0]
adcq 8(%rdi),%r9 // r9 += t[1]
adcq 16(%rdi),%r15 // r15 += t[2]
adcq 24(%rdi),%r14 // r14 += t[3]
adcq 32(%rdi),%r13 // r13 += t[4]
adcq 40(%rdi),%r12 // r12 += t[5]
adcq 48(%rdi),%r11 // r11 += t[6]
adcq 56(%rdi),%r10 // r10 += t[7]
adcq $0,%rax // rax += CF
movq -8(%rbp),%rcx // rcx = n[7]
xorq %rsi,%rsi // rsi = 0
movq %xmm1,%rbp // rbp = n
movq %r8,(%rdi) // Save the calculated result back to t[].
movq %r9,8(%rdi)
movq %xmm5,%r9
movq %r15,16(%rdi)
movq %r14,24(%rdi)
movq %r13,32(%rdi)
movq %r12,40(%rdi)
movq %r11,48(%rdi)
movq %r10,56(%rdi)
leaq 64(%rdi),%rdi // t += 8
cmpq 16(%rsp),%rdi // Cycle the entire t[].
jb .LoopReduceSqr8x
ret
.cfi_endproc
.size MontSqr8Inner,.-MontSqr8Inner
#endif
| 2302_82127028/openHiTLS-examples_1508 | crypto/bn/src/asm/bn_montx_x86_64.S | Unix Assembly | unknown | 53,538 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#include "hitls_build.h"
#ifdef HITLS_CRYPTO_BN
#include <stdint.h>
#include "crypt_errno.h"
#include "bn_bincal.h"
#include "bn_asm.h"
#if defined(HITLS_CRYPTO_BN_X8664) && defined(__x86_64__)
#include "crypt_utils.h"
#endif
int32_t MontSqrBin(BN_UINT *r, BN_Mont *mont, BN_Optimizer *opt, bool consttime)
{
if (mont->mSize > 1) {
#if defined(HITLS_CRYPTO_BN_X8664) && defined(__x86_64__)
if (IsSupportBMI2() && IsSupportADX()) {
MontMulx_Asm(r, r, r, mont->mod, mont->k0, mont->mSize);
return CRYPT_SUCCESS;
}
#endif
MontMul_Asm(r, r, r, mont->mod, mont->k0, mont->mSize);
return CRYPT_SUCCESS;
}
return MontSqrBinCore(r, mont, opt, consttime);
}
int32_t MontMulBin(BN_UINT *r, const BN_UINT *a, const BN_UINT *b, BN_Mont *mont,
BN_Optimizer *opt, bool consttime)
{
if (mont->mSize > 1) {
#if defined(HITLS_CRYPTO_BN_X8664) && defined(__x86_64__)
if (IsSupportBMI2() && IsSupportADX()) {
MontMulx_Asm(r, a, b, mont->mod, mont->k0, mont->mSize);
return CRYPT_SUCCESS;
}
#endif
MontMul_Asm(r, a, b, mont->mod, mont->k0, mont->mSize);
return CRYPT_SUCCESS;
}
return MontMulBinCore(r, a, b, mont, opt, consttime);
}
int32_t MontEncBin(BN_UINT *r, BN_Mont *mont, BN_Optimizer *opt, bool consttime)
{
if (mont->mSize > 1) {
#if defined(HITLS_CRYPTO_BN_X8664) && defined(__x86_64__)
if (IsSupportBMI2() && IsSupportADX()) {
MontMulx_Asm(r, r, mont->montRR, mont->mod, mont->k0, mont->mSize);
return CRYPT_SUCCESS;
}
#endif
MontMul_Asm(r, r, mont->montRR, mont->mod, mont->k0, mont->mSize);
return CRYPT_SUCCESS;
}
return MontEncBinCore(r, mont, opt, consttime);
}
void Reduce(BN_UINT *r, BN_UINT *x, const BN_UINT *one, const BN_UINT *m, uint32_t mSize, BN_UINT m0)
{
if (mSize <= 1) {
ReduceCore(r, x, m, mSize, m0);
return;
}
#if defined(HITLS_CRYPTO_BN_X8664) && defined(__x86_64__)
if (IsSupportBMI2() && IsSupportADX()) {
MontMulx_Asm(r, x, one, m, m0, mSize);
return;
}
#endif
MontMul_Asm(r, x, one, m, m0, mSize);
return;
}
#endif /* HITLS_CRYPTO_BN */
| 2302_82127028/openHiTLS-examples_1508 | crypto/bn/src/asm_bn_mont.c | C | unknown | 2,767 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#ifndef BN_ASM_H
#define BN_ASM_H
#include "hitls_build.h"
#ifdef HITLS_CRYPTO_BN
#include <stdint.h>
#include <stdlib.h>
#include "crypt_bn.h"
#ifdef __cplusplus
extern "C" {
#endif
/**
* Function description: r = reduce(a * b) mod n
* Function prototype: void MontMul_Asm(uint64_t *r, const uint64_t *a, const uint64_t *b,
* const uint64_t *n, const uint64_t k0, uint32_t size);
* Input register:
* x0: result array pointer r
* x1: source data array pointer a
* x2: source data array pointer b
* x3: source data array pointer n
* x4: k0 in the mont structure
* x5: The size of the first four arrays is 'size'.
* Modify registers: x0-x17, x19-x24
* Output register: None
* Function/Macro Call: bn_mont_sqr8x, bn_mont_mul4x
* Remarks: The four arrays must have the same length.
* If these are different, expand the length to the length of the longest array.
* In addition, the expanded part needs to be cleared to 0.
*/
void MontMul_Asm(BN_UINT *r, const BN_UINT *a, const BN_UINT *b, const BN_UINT *n, const BN_UINT k0, size_t size);
#if defined(HITLS_CRYPTO_BN_X8664)
void MontMulx_Asm(BN_UINT *r, const BN_UINT *a, const BN_UINT *b, const BN_UINT *n, const BN_UINT k0, size_t size);
#endif
#ifdef __cplusplus
}
#endif
#endif /* HITLS_CRYPTO_BN */
#endif | 2302_82127028/openHiTLS-examples_1508 | crypto/bn/src/bn_asm.h | C | unknown | 1,978 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#include "hitls_build.h"
#ifdef HITLS_CRYPTO_BN
#include "securec.h"
#include "bsl_sal.h"
#include "bsl_err_internal.h"
#include "crypt_errno.h"
#include "bn_bincal.h"
#include "bn_basic.h"
BN_BigNum *BN_Create(uint32_t bits)
{
if (bits > BN_MAX_BITS) {
BSL_ERR_PUSH_ERROR(CRYPT_BN_BITS_INVALID);
return NULL;
}
uint32_t room = BITS_TO_BN_UNIT(bits);
BN_BigNum *r = (BN_BigNum *)BSL_SAL_Calloc(1u, sizeof(BN_BigNum));
if (r == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_MEM_ALLOC_FAIL);
return NULL;
}
if (room != 0) {
r->room = room;
r->data = (BN_UINT *)BSL_SAL_Calloc(1u, room * sizeof(BN_UINT));
if (r->data == NULL) {
BSL_SAL_FREE(r);
return NULL;
}
}
return r;
}
void BN_Destroy(BN_BigNum *a)
{
if (a == NULL) {
return;
}
// clear sensitive information
BSL_SAL_CleanseData((void *)(a->data), a->size * sizeof(BN_UINT));
if (a->flag == CRYPT_BN_FLAG_STATIC) {
return;
}
BSL_SAL_FREE(a->data);
if (!BN_IsFlag(a, CRYPT_BN_FLAG_OPTIMIZER)) {
BSL_SAL_FREE(a);
}
}
inline void BN_Init(BN_BigNum *bn, BN_UINT *data, uint32_t room, int32_t number)
{
for (uint32_t i = 0; i < (uint32_t)number; i++) {
bn[i].data = &data[room * i];
bn[i].room = room;
bn[i].flag = CRYPT_BN_FLAG_STATIC;
}
}
#ifdef HITLS_CRYPTO_EAL_BN
bool BnVaild(const BN_BigNum *a)
{
if (a == NULL) {
return false;
}
if (a->size == 0) {
return !a->sign;
}
if (a->data == NULL || a->size > a->room) {
return false;
}
if ((a->size <= a->room) && (a->data[a->size - 1] != 0)) {
return true;
}
return false;
}
#endif
#ifdef HITLS_CRYPTO_BN_CB
BN_CbCtx *BN_CbCtxCreate(void)
{
BN_CbCtx *r = (BN_CbCtx *)BSL_SAL_Calloc(1u, sizeof(BN_CbCtx));
if (r == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_MEM_ALLOC_FAIL);
return NULL;
}
return r;
}
void BN_CbCtxSet(BN_CbCtx *gencb, BN_CallBack callBack, void *arg)
{
if (gencb == NULL) {
return;
}
BN_CbCtx *tmpCb = gencb;
tmpCb->arg = arg;
tmpCb->cb = callBack;
}
void *BN_CbCtxGetArg(BN_CbCtx *callBack)
{
if (callBack == NULL) {
return NULL;
}
return callBack->arg;
}
int32_t BN_CbCtxCall(BN_CbCtx *callBack, int32_t process, int32_t target)
{
if (callBack == NULL || callBack->cb == NULL) {
return CRYPT_SUCCESS;
}
int32_t ret = callBack->cb(callBack, process, target);
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
}
return ret;
}
void BN_CbCtxDestroy(BN_CbCtx *cb)
{
if (cb == NULL) {
return;
}
BSL_SAL_FREE(cb);
}
#endif
int32_t BN_SetSign(BN_BigNum *a, bool sign)
{
if (a == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return CRYPT_NULL_INPUT;
}
/* 0 must be a positive number symbol */
if (BN_IsZero(a) == true && sign == true) {
BSL_ERR_PUSH_ERROR(CRYPT_BN_NO_NEGATIVE_ZERO);
return CRYPT_BN_NO_NEGATIVE_ZERO;
}
a->sign = sign;
return CRYPT_SUCCESS;
}
static bool IsLegalFlag(uint32_t flag)
{
switch (flag) {
case CRYPT_BN_FLAG_CONSTTIME:
case CRYPT_BN_FLAG_OPTIMIZER:
case CRYPT_BN_FLAG_STATIC:
return true;
default:
return false;
}
}
int32_t BN_SetFlag(BN_BigNum *a, uint32_t flag)
{
if (a == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return CRYPT_NULL_INPUT;
}
if (!IsLegalFlag(flag)) {
BSL_ERR_PUSH_ERROR(CRYPT_BN_FLAG_INVALID);
return CRYPT_BN_FLAG_INVALID;
}
a->flag |= flag;
return CRYPT_SUCCESS;
}
int32_t BN_Copy(BN_BigNum *r, const BN_BigNum *a)
{
if (r == NULL || a == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return CRYPT_NULL_INPUT;
}
if (r != a) {
int32_t ret = BnExtend(r, a->size);
if (ret != CRYPT_SUCCESS) {
return ret;
}
r->sign = a->sign;
BN_COPY_BYTES(r->data, r->size, a->data, a->size);
r->size = a->size;
}
return CRYPT_SUCCESS;
}
BN_BigNum *BN_Dup(const BN_BigNum *a)
{
if (a == NULL) {
return NULL;
}
BN_BigNum *r = BN_Create(a->room * BN_UINT_BITS);
if (r != NULL) {
r->sign = a->sign;
(void)memcpy_s(r->data, a->size * sizeof(BN_UINT), a->data, a->size * sizeof(BN_UINT));
r->size = a->size;
}
return r;
}
bool BN_IsZero(const BN_BigNum *a)
{
if (a == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return true;
}
return (a->size == 0);
}
bool BN_IsOne(const BN_BigNum *a)
{
if (a == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return false;
}
return (a->size == 1 && a->data[0] == 1 && a->sign == false);
}
bool BN_IsNegative(const BN_BigNum *a)
{
if (a == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return false;
}
return a->sign;
}
bool BN_IsOdd(const BN_BigNum *a)
{
if (a == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return false;
}
return (a->size > 0) && (a->data[0] & 1) != 0;
}
bool BN_IsFlag(const BN_BigNum *a, uint32_t flag)
{
if (a == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return false;
}
return a->flag & flag;
}
int32_t BN_Zeroize(BN_BigNum *a)
{
if (a == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return CRYPT_NULL_INPUT;
}
// clear sensitive information
BSL_SAL_CleanseData(a->data, a->size * sizeof(BN_UINT));
a->sign = false;
a->size = 0;
return CRYPT_SUCCESS;
}
bool BN_IsLimb(const BN_BigNum *a, const BN_UINT w)
{
if (a == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return (w == 0);
}
return !a->sign && (((a->size == 1) && (a->data[0] == w)) || ((w == 0) && (a->size == 0)));
}
int32_t BN_SetLimb(BN_BigNum *r, BN_UINT w)
{
if (r == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return CRYPT_NULL_INPUT;
}
int32_t ret = BnExtend(r, 1);
if (ret != CRYPT_SUCCESS) {
return ret;
}
BN_Zeroize(r);
if (w != 0) {
r->data[r->size] = w;
r->size++;
}
return CRYPT_SUCCESS;
}
BN_UINT BN_GetLimb(const BN_BigNum *a)
{
if (a == NULL) {
return 0;
}
if (a->size > 1) {
return BN_MASK;
} else if (a->size == 1) {
return a->data[0];
}
return 0;
}
bool BN_GetBit(const BN_BigNum *a, uint32_t n)
{
if (a == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return false;
}
uint32_t nw = n / BN_UINT_BITS;
uint32_t nb = n % BN_UINT_BITS;
if (nw >= a->size) {
return false;
}
return (uint32_t)(((a->data[nw]) >> nb) & ((BN_UINT)1));
}
int32_t BN_SetBit(BN_BigNum *a, uint32_t n)
{
if (a == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return CRYPT_NULL_INPUT;
}
uint32_t nw = n / BN_UINT_BITS;
uint32_t nb = n % BN_UINT_BITS;
if (nw >= a->room) {
BSL_ERR_PUSH_ERROR(CRYPT_BN_SPACE_NOT_ENOUGH);
return CRYPT_BN_SPACE_NOT_ENOUGH;
}
a->data[nw] |= (((BN_UINT)1) << nb);
if (a->size < nw + 1) {
a->size = nw + 1;
}
return CRYPT_SUCCESS;
}
int32_t BN_ClrBit(BN_BigNum *a, uint32_t n)
{
if (a == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return CRYPT_NULL_INPUT;
}
uint32_t nw = n / BN_UINT_BITS;
uint32_t nb = n % BN_UINT_BITS;
if (nw >= a->size) {
BSL_ERR_PUSH_ERROR(CRYPT_BN_SPACE_NOT_ENOUGH);
return CRYPT_BN_SPACE_NOT_ENOUGH;
}
a->data[nw] &= (~(((BN_UINT)1) << nb));
// check whether the size changes
a->size = BinFixSize(a->data, a->size);
if (a->size == 0) {
a->sign = false;
}
return CRYPT_SUCCESS;
}
int32_t BN_MaskBit(BN_BigNum *a, uint32_t n)
{
if (a == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return CRYPT_NULL_INPUT;
}
uint32_t nw = n / BN_UINT_BITS;
uint32_t nb = n % BN_UINT_BITS;
if (a->size <= nw) {
BSL_ERR_PUSH_ERROR(CRYPT_BN_SPACE_NOT_ENOUGH);
return CRYPT_BN_SPACE_NOT_ENOUGH;
}
if (nb == 0) {
a->size = nw;
} else {
a->size = nw + 1;
a->data[nw] &= ~(BN_MASK << nb);
}
a->size = BinFixSize(a->data, a->size);
if (a->size == 0) {
a->sign = false;
}
return CRYPT_SUCCESS;
}
uint32_t BN_Bits(const BN_BigNum *a)
{
if (a == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return 0;
}
return BinBits(a->data, a->size);
}
uint32_t BN_Bytes(const BN_BigNum *a)
{
return BN_BITS_TO_BYTES(BN_Bits(a));
}
int32_t BnExtend(BN_BigNum *a, uint32_t words)
{
if (a->room >= words) {
return CRYPT_SUCCESS;
}
if (a->flag == CRYPT_BN_FLAG_STATIC) {
BSL_ERR_PUSH_ERROR(CRYPT_BN_NOT_SUPPORT_EXTENSION);
return CRYPT_BN_NOT_SUPPORT_EXTENSION;
}
if (words > BITS_TO_BN_UNIT(BN_MAX_BITS)) {
BSL_ERR_PUSH_ERROR(CRYPT_BN_BITS_TOO_MAX);
return CRYPT_BN_BITS_TOO_MAX;
}
BN_UINT *tmp = (BN_UINT *)BSL_SAL_Calloc(1u, words * sizeof(BN_UINT));
if (tmp == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_MEM_ALLOC_FAIL);
return CRYPT_MEM_ALLOC_FAIL;
}
if (a->size > 0) {
(void)memcpy_s(tmp, a->size * sizeof(BN_UINT), a->data, a->size * sizeof(BN_UINT));
BSL_SAL_CleanseData(a->data, a->size * sizeof(BN_UINT));
}
BSL_SAL_FREE(a->data);
a->data = tmp;
a->room = words;
return CRYPT_SUCCESS;
}
// ref. NIST.SP.800-57 Section 5.6.1.1
int32_t BN_SecBits(int32_t pubLen, int32_t prvLen)
{
int32_t bits = 256; // the secure length is initialized to a maximum of 256
int32_t level[] = {1024, 2048, 3072, 7680, 15360, INT32_MAX};
int32_t secbits[] = {0, 80, 112, 128, 192, 256};
for (int32_t loc = 0; loc < (int32_t)(sizeof(level) / sizeof(level[0])); loc++) {
if (pubLen < level[loc]) {
bits = secbits[loc];
break;
}
}
if (prvLen == -1) { // In IFC algorithm, the security length only needs to consider the modulus number.
return bits;
}
bits = ((prvLen / 2) >= bits) ? bits : (prvLen / 2); // The security length of FFC algorithm is considering prvLen/2
// Encryption does not use the algorithm/key combination which security strength is less than 112 bits
// such as less than 80 bits
return (bits < 80) ? 0 : bits;
}
#endif /* HITLS_CRYPTO_BN */
| 2302_82127028/openHiTLS-examples_1508 | crypto/bn/src/bn_basic.c | C | unknown | 11,141 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#ifndef BN_BASIC_H
#define BN_BASIC_H
#include "hitls_build.h"
#ifdef HITLS_CRYPTO_BN
#include "crypt_bn.h"
#ifdef __cplusplus
extern "C" {
#endif
struct BnMont {
uint32_t mSize; /* *< size of mod in BN_UINT */
BN_UINT k0; /* *< low word of (1/(r - mod[0])) mod r */
BN_UINT *mod; /* *< mod */
BN_UINT *one; /* *< store one */
BN_UINT *montRR; /* *< mont_enc(1) */
BN_UINT *b; /* *< tmpb(1) */
BN_UINT *t; /* *< tmpt(1) ^ 2 */
};
struct BnCbCtx {
void *arg; // callback parameter
BN_CallBack cb; // callback function, which is defined by the user
};
/* Find a pointer address aligned by 'alignment' bytes in the [ptr, ptr + alignment - 1] range.
The input parameter alignment cannot be 0. */
static inline BN_UINT *AlignedPointer(const void *ptr, uintptr_t alignment)
{
uint8_t *p = (uint8_t *)(uintptr_t)ptr + alignment - 1;
return (BN_UINT *)((uintptr_t)p - (uintptr_t)p % alignment);
}
int32_t BnExtend(BN_BigNum *a, uint32_t words);
#ifdef __cplusplus
}
#endif
#endif /* HITLS_CRYPTO_BN */
#endif | 2302_82127028/openHiTLS-examples_1508 | crypto/bn/src/bn_basic.h | C | unknown | 1,642 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#include "hitls_build.h"
#ifdef HITLS_CRYPTO_BN
#include <stdint.h>
#include "securec.h"
#include "bn_bincal.h"
/* r = a + w, the length of r and a array is 'size'. The return value is the carry. */
BN_UINT BinInc(BN_UINT *r, const BN_UINT *a, uint32_t size, BN_UINT w)
{
uint32_t i;
BN_UINT carry = w;
for (i = 0; i < size && carry != 0; i++) {
ADD_AB(carry, r[i], a[i], carry);
}
if (r != a) {
for (; i < size; i++) {
r[i] = a[i];
}
}
return carry;
}
/* r = a - w, the length of r and a array is 'size'. The return value is the borrow-digit. */
BN_UINT BinDec(BN_UINT *r, const BN_UINT *a, uint32_t n, BN_UINT w)
{
uint32_t i;
BN_UINT borrow = w;
for (i = 0; (i < n) && (borrow > 0); i++) {
SUB_AB(borrow, r[i], a[i], borrow);
}
if (r != a) {
for (; i < n; i++) {
r[i] = a[i];
}
}
return borrow;
}
/* r = a >> bits, the return value is the valid length of r after the shift.
* The array length of a is n. The length of the r array must meet the requirements of the accepted calculation result,
* which is guaranteed by the input parameter.
*/
uint32_t BinRshift(BN_UINT *r, const BN_UINT *a, uint32_t n, uint32_t bits)
{
uint32_t nw = bits / BN_UINT_BITS; /* shift words */
uint32_t nb = bits % BN_UINT_BITS; /* shift bits */
/**
* unsigned shift operand cannot be greater than or equal to the data bit width
* Otherwise, undefined behavior is triggered.
*/
uint32_t na = (BN_UINT_BITS - nb) % BN_UINT_BITS;
uint32_t rsize = n - nw;
uint32_t i;
BN_UINT hi;
BN_UINT lo = a[nw];
/* When nb == 0, discard the value of (hi << na) with the all-zero mask. */
BN_UINT mask = ~BN_IsZeroUintConsttime(nb);
/* Assigns values from the lower bits. */
for (i = nw; i < n - 1; i++) {
hi = a[i + 1];
r[i - nw] = (lo >> nb) | ((hi << na) & mask);
lo = hi;
}
lo >>= nb;
if (lo != 0) {
r[rsize - 1] = lo;
} else {
rsize--;
}
return rsize;
}
/* r = a << bits. The return value is the valid length of r after the shift.
* The array length of a is n. The length of the r array must meet the requirements of the accepted calculation result,
* which is guaranteed by the input parameter.
*/
uint32_t BinLshift(BN_UINT *r, const BN_UINT *a, uint32_t n, uint32_t bits)
{
uint32_t nw = bits / BN_UINT_BITS; /* shift words */
uint32_t nb = bits % BN_UINT_BITS; /* shift bits */
/**
* unsigned shift operand cannot be greater than or equal to the data bit width
* Otherwise, undefined behavior is triggered.
*/
uint32_t na = (BN_UINT_BITS - nb) % BN_UINT_BITS;
uint32_t rsize = n + nw;
uint32_t i;
BN_UINT hi = a[n - 1];
BN_UINT lo;
/* When nb == 0, discard the value of (hi << na) with the all-zero mask. */
BN_UINT mask = ~BN_IsZeroUintConsttime(nb);
lo = (hi >> na) & mask;
/* Assign a value to the most significant bit. */
if (lo != 0) {
r[rsize++] = lo;
}
/* Assign a value from the most significant bits. */
for (i = n - 1; i > 0; i--) {
lo = a[i - 1];
r[i + nw] = (hi << nb) | ((lo >> na) & mask);
hi = lo;
}
r[nw] = a[0] << nb;
/* Clear the lower bits to 0. */
if (nw != 0) {
(void)memset_s(r, nw * sizeof(BN_UINT), 0, nw * sizeof(BN_UINT));
}
return rsize;
}
/* r = a * b + r. The return value is a carry. */
BN_UINT BinMulAcc(BN_UINT *r, const BN_UINT *a, uint32_t aSize, BN_UINT b)
{
BN_UINT c = 0;
BN_UINT *rr = r;
const BN_UINT *aa = a;
uint32_t size = aSize;
#ifndef HITLS_CRYPTO_BN_SMALL_MEM
while (size >= 4) { /* a group of 4 */
MULADD_ABC(c, rr[0], aa[0], b); /* offset 0 */
MULADD_ABC(c, rr[1], aa[1], b); /* offset 1 */
MULADD_ABC(c, rr[2], aa[2], b); /* offset 2 */
MULADD_ABC(c, rr[3], aa[3], b); /* offset 3 */
aa += 4; /* a group of 4 */
rr += 4; /* a group of 4 */
size -= 4; /* a group of 4 */
}
#endif
while (size > 0) {
MULADD_ABC(c, rr[0], aa[0], b);
aa++;
rr++;
size--;
}
return c;
}
/* r = a * b rRoom >= aSize + bSize. The length is guaranteed by the input parameter. r != a, r != b.
* The return value is the valid length of the result. */
uint32_t BinMul(BN_UINT *r, uint32_t rRoom, const BN_UINT *a, uint32_t aSize, const BN_UINT *b, uint32_t bSize)
{
BN_UINT carry = 0;
(void)memset_s(r, rRoom * sizeof(BN_UINT), 0, rRoom * sizeof(BN_UINT));
/* Result combination of cyclic calculation data units. */
for (uint32_t i = 0; i < bSize; i++) {
carry = 0;
uint32_t j = 0;
BN_UINT t = b[i];
for (; j < aSize; j++) {
MULADC_AB(r[i + j], a[j], t, carry);
}
if (carry != 0) {
r[i + j] = carry;
}
}
return aSize + bSize - (carry == 0);
}
/* r = a * a rRoom >= aSize * 2. The length is guaranteed by the input parameter. r != a.
* The return value is the valid length of the result. */
uint32_t BinSqr(BN_UINT *r, uint32_t rRoom, const BN_UINT *a, uint32_t aSize)
{
uint32_t i;
BN_UINT carry;
(void)memset_s(r, rRoom * sizeof(BN_UINT), 0, rRoom * sizeof(BN_UINT));
/* Calculate unequal data units, similar to trapezoid. */
for (i = 0; i < aSize - 1; i++) {
BN_UINT t = a[i];
uint32_t j;
for (j = i + 1, carry = 0; j < aSize; j++) {
MULADC_AB(r[i + j], a[j], t, carry);
}
r[i + j] = carry;
}
/* In the square, the multiplier unit is symmetrical. r = r * 2 */
BinLshift(r, r, 2 * aSize - 1, 1);
/* Calculate the direct squared data unit and add it to the result. */
for (i = 0, carry = 0; i < aSize; i++) {
BN_UINT rh, rl;
SQR_A(rh, rl, a[i]);
ADD_ABC(carry, r[i << 1], r[i << 1], rl, carry);
ADD_ABC(carry, r[(i << 1) + 1], r[(i << 1) + 1], rh, carry);
}
return aSize + aSize - (r[(aSize << 1) - 1] == 0);
}
/* refresh the size */
uint32_t BinFixSize(const BN_UINT *data, uint32_t size)
{
uint32_t fix = size;
uint32_t i = size;
for (; i > 0; i--) {
if (data[i - 1] != 0) {
return fix;
};
fix--;
}
return fix;
}
/* compare BN array. Maybe aSize != bSize;
* return 0, if a == b
* return 1, if a > b
* return -1, if a < b
*/
int32_t BinCmp(const BN_UINT *a, uint32_t aSize, const BN_UINT *b, uint32_t bSize)
{
if (aSize == bSize) {
uint32_t len = aSize;
while (len > 0) {
len--;
if (a[len] != b[len]) {
return a[len] > b[len] ? 1 : -1;
}
}
return 0;
}
return aSize > bSize ? 1 : -1;
}
/* obtain bits */
uint32_t BinBits(const BN_UINT *data, uint32_t size)
{
if (size == 0) {
return 0;
}
return (size * BN_UINT_BITS - GetZeroBitsUint(data[size - 1]));
}
/**
* Try to reduce the borrowing cost, guarantee h|l >= q * yl. If q is too large, reduce q.
* Each time q decreases by 1, h increases by yh. y was previously offset, and the most significant bit of yh is 1.
* Therefore (q * yl << BN_UINT_BITS) < (yh * 2), number of borrowing times ≤ 2.
*/
static BN_UINT TryDiv(BN_UINT q, BN_UINT h, BN_UINT l, BN_UINT yh, BN_UINT yl)
{
BN_UINT rh, rl;
MUL_AB(rh, rl, q, yl);
/* Compare h|l >= rh|rl. Otherwise, reduce q. */
if (rh < h || (rh == h && rl <= l)) {
return q;
}
BN_UINT nq = q - 1;
BN_UINT nh = h + yh;
/* If carry occurs, no judgment is required. */
if (nh < yh) {
return nq;
}
/* rh|rl - yl */
if (rl < yl) {
rh--;
}
rl -= yl;
/* Compare r|l >= rh|rl. Otherwise, reduce q. */
if (rh < nh || (rh == nh && rl <= l)) {
return nq;
}
nq--;
return nq;
}
/* Divide core operation */
static void BinDivCore(BN_UINT *q, uint32_t *qSize, BN_UINT *x, uint32_t xSize, const BN_UINT *y, uint32_t ySize)
{
BN_UINT yy = y[ySize - 1]; /* Obtain the most significant bit of the data. */
uint32_t i;
for (i = xSize; i >= ySize; i--) {
BN_UINT qq;
if (x[i] == yy) {
qq = (BN_UINT)-1;
} else {
BN_UINT rr;
DIV_ND(qq, rr, x[i], x[i - 1], yy);
if (ySize > 1) { /* If ySize is 1, do not need to try divide. */
/* Obtain the least significant bit data, that is, make subscript - 2. */
qq = TryDiv(qq, rr, x[i - 2], yy, y[ySize - 2]);
}
}
if (qq > 0) {
/* After the TryDiv is complete, perform the double subtraction. */
BN_UINT extend = BinSubMul(&x[i - ySize], y, ySize, qq);
extend = (x[i] -= extend);
if (extend > 0) {
/* reverse, borrowing required */
extend = BinAdd(&x[i - ySize], &x[i - ySize], y, ySize);
x[i] += extend;
qq--;
}
if (q != NULL && qq != 0) {
/* update quotient */
q[i - ySize] = qq;
*qSize = (*qSize) > (i - ySize + 1) ? (*qSize) : (i - ySize + 1);
}
}
}
}
// The L-shift of the divisor does not exceed the highest BN_UINT.
static void BnLshiftSimple(BN_UINT *a, uint32_t aSize, uint32_t bits)
{
uint32_t rem = BN_UNIT_BITS - bits;
BN_UINT nextBits = 0;
for (uint32_t i = 0; i < aSize; i++) {
BN_UINT n = a[i];
a[i] = (n << bits) | nextBits;
nextBits = (n >> rem);
}
return;
}
/**
* x / y = q...x, the return value is the updated xSize.
* q and asize are both NULL or not NULL. Other input parameters must be valid.
* q, x and y cannot be the same pointer, the data in q must be 0.
* Ensure that x->room >= xSize + 2, and the extra two spaces need to be cleared. Extra space is used during try divide.
* this interface does not ensure that the y is consistent after running.
*/
uint32_t BinDiv(BN_UINT *q, uint32_t *qSize, BN_UINT *x, uint32_t xSize, BN_UINT *y, uint32_t ySize)
{
uint32_t shifts = GetZeroBitsUint(y[ySize - 1]);
uint32_t xNewSize = xSize;
/* Left shift until the maximum displacement of the divisor is full. */
if (shifts != 0) {
BnLshiftSimple(y, ySize, shifts);
xNewSize = BinLshift(x, x, xSize, shifts);
}
BinDivCore(q, qSize, x, xSize, y, ySize);
/* shift compensation */
if (shifts != 0) {
xNewSize = BinRshift(x, x, xNewSize, shifts);
}
return BinFixSize(x, xNewSize);
}
#endif /* HITLS_CRYPTO_BN */
| 2302_82127028/openHiTLS-examples_1508 | crypto/bn/src/bn_bincal.c | C | unknown | 11,223 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#ifndef BN_BINCAL_H
#define BN_BINCAL_H
#include "hitls_build.h"
#ifdef HITLS_CRYPTO_BN
#include <stdint.h>
#include "bn_basic.h"
#if defined(HITLS_CRYPTO_BN_X8664)
#include "bn_bincal_x8664.h"
#elif defined(HITLS_CRYPTO_BN_ARMV8)
#include "bn_bincal_armv8.h"
#else
#include "bn_bincal_noasm.h"
#endif
#ifdef __cplusplus
extern "c" {
#endif
/* r = a + b, input 'carry' means carry */
#define ADD_AB(carry, r, a, b) \
do { \
BN_UINT macroTmpT = (a) + (b); \
(carry) = macroTmpT < (a) ? 1 : 0; \
(r) = macroTmpT; \
} while (0)
/* r = a - b, input 'borrow' means borrow digit */
#define SUB_AB(borrow, r, a, b) \
do { \
BN_UINT macroTmpT = (a) - (b); \
(borrow) = ((a) < (b)) ? 1 : 0; \
(r) = macroTmpT; \
} while (0)
/* r = a - b - c, input 'borrow' means borrow digit */
#define SUB_ABC(borrow, r, a, b, c) \
do { \
BN_UINT macroTmpS = (a) - (b); \
BN_UINT macroTmpB = ((a) < (b)) ? 1 : 0; \
macroTmpB += (macroTmpS < (c)) ? 1 : 0; \
(r) = macroTmpS - (c); \
borrow = macroTmpB; \
} while (0)
#define BN_UINT_HALF_BITS (BN_UINT_BITS >> 1)
/* carry value of the upper part */
#define BN_UINT_HC ((BN_UINT)1 << BN_UINT_HALF_BITS)
/* Takes the low bit and assigns it to the high bit. */
#define BN_UINT_LO_TO_HI(t) ((t) << BN_UINT_HALF_BITS)
/* Takes the high bit and assigns it to the high bit. */
#define BN_UINT_HI_TO_HI(t) ((t) & ((BN_UINT)0 - BN_UINT_HC))
/* Takes the low bit and assigns it to the low bit. */
#define BN_UINT_LO(t) ((t) & (BN_UINT_HC - 1))
/* Takes the high bit and assigns it to the low bit. */
#define BN_UINT_HI(t) ((t) >> BN_UINT_HALF_BITS)
/* copy bytes, ensure that dstLen >= srcLen */
#define BN_COPY_BYTES(dst, dstlen, src, srclen) \
do { \
uint32_t macroTmpI; \
for (macroTmpI = 0; macroTmpI < (srclen); macroTmpI++) { (dst)[macroTmpI] = (src)[macroTmpI]; } \
for (; macroTmpI < (dstlen); macroTmpI++) { (dst)[macroTmpI] = 0; } \
} while (0)
// Modular operation, satisfy d < (1 << BN_UINT_HALF_BITS) r = nh | nl % d
#define MOD_HALF(r, nh, nl, d) \
do { \
BN_UINT macroTmpD = (d); \
(r) = (nh) % macroTmpD; \
(r) = ((r) << BN_UINT_HALF_BITS) | BN_UINT_HI((nl)); \
(r) = (r) % macroTmpD; \
(r) = ((r) << BN_UINT_HALF_BITS) | BN_UINT_LO((nl)); \
(r) = (r) % macroTmpD; \
} while (0)
/* r = a * b + r + c, where c is refreshed as the new carry value */
#define MULADD_ABC(c, r, a, b) \
do { \
BN_UINT macroTmpAl = BN_UINT_LO(a); \
BN_UINT macroTmpAh = BN_UINT_HI(a); \
BN_UINT macroTmpBl = BN_UINT_LO(b); \
BN_UINT macroTmpBh = BN_UINT_HI(b); \
BN_UINT macroTmpX3 = macroTmpAh * macroTmpBh; \
BN_UINT macroTmpX2 = macroTmpAh * macroTmpBl; \
BN_UINT macroTmpX1 = macroTmpAl * macroTmpBh; \
BN_UINT macroTmpX0 = macroTmpAl * macroTmpBl; \
(r) += (c); \
(c) = ((r) < (c)) ? 1 : 0; \
macroTmpX1 += macroTmpX2; \
(c) += (macroTmpX1 < macroTmpX2) ? BN_UINT_HC : 0; \
macroTmpX2 = macroTmpX0; \
macroTmpX0 += macroTmpX1 << BN_UINT_HALF_BITS; \
(c) += (macroTmpX0 < macroTmpX2) ? 1 : 0; \
(c) += BN_UINT_HI(macroTmpX1); \
(c) += macroTmpX3; \
(r) += macroTmpX0; \
(c) += ((r) < macroTmpX0) ? 1 : 0; \
} while (0)
/* r = a + b + c, input 'carry' means carry. Note that a and carry cannot be the same variable. */
#define ADD_ABC(carry, r, a, b, c) \
do { \
BN_UINT macroTmpS = (b) + (c); \
carry = (macroTmpS < (c)) ? 1 : 0; \
(r) = macroTmpS + (a); \
carry += ((r) < macroTmpS) ? 1 : 0; \
} while (0)
BN_UINT BinAdd(BN_UINT *r, const BN_UINT *a, const BN_UINT *b, uint32_t n);
BN_UINT BinSub(BN_UINT *r, const BN_UINT *a, const BN_UINT *b, uint32_t n);
BN_UINT BinInc(BN_UINT *r, const BN_UINT *a, uint32_t size, BN_UINT w);
BN_UINT BinDec(BN_UINT *r, const BN_UINT *a, uint32_t n, BN_UINT w);
uint32_t BinRshift(BN_UINT *r, const BN_UINT *a, uint32_t n, uint32_t bits);
BN_UINT BinSubMul(BN_UINT *r, const BN_UINT *a, BN_UINT aSize, BN_UINT m);
uint32_t BinLshift(BN_UINT *r, const BN_UINT *a, uint32_t n, uint32_t bits);
BN_UINT BinMulAcc(BN_UINT *r, const BN_UINT *a, uint32_t aSize, BN_UINT b);
uint32_t BinMul(BN_UINT *r, uint32_t rRoom, const BN_UINT *a, uint32_t aSize, const BN_UINT *b, uint32_t bSize);
uint32_t BinSqr(BN_UINT *r, uint32_t rRoom, const BN_UINT *a, uint32_t aSize);
uint32_t GetZeroBitsUint(BN_UINT x);
uint32_t BinFixSize(const BN_UINT *data, uint32_t size);
int32_t BinCmp(const BN_UINT *a, uint32_t aSize, const BN_UINT *b, uint32_t bSize);
uint32_t BinBits(const BN_UINT *data, uint32_t size);
uint32_t BinDiv(BN_UINT *q, uint32_t *qSize, BN_UINT *x, uint32_t xSize, BN_UINT *y, uint32_t ySize);
#ifdef HITLS_CRYPTO_BN_COMBA
uint32_t SpaceSize(uint32_t size);
// Perform a multiplication calculation of 4 blocks of data, r = a^2,
// where the length of r is 8, and the length of a is 4.
void MulComba4(BN_UINT *r, const BN_UINT *a, const BN_UINT *b);
// Calculate the square of 4 blocks of data, r = a^2, where the length of r is 8, and the length of a is 4.
void SqrComba4(BN_UINT *r, const BN_UINT *a);
// Perform a multiplication calculation of 6 blocks of data, r = a*b,
// where the length of r is 12, the length of a and b is 6.
void MulComba6(BN_UINT *r, const BN_UINT *a, const BN_UINT *b);
// Calculate the square of 6 blocks of data, r = a^2, where the length of r is 12, and the length of a is 6.
void SqrComba6(BN_UINT *r, const BN_UINT *a);
void MulConquer(BN_UINT *r, const BN_UINT *a, const BN_UINT *b, uint32_t size, BN_UINT *space, bool consttime);
void SqrConquer(BN_UINT *r, const BN_UINT *a, uint32_t size, BN_UINT *space, bool consttime);
#endif
int32_t MontSqrBinCore(BN_UINT *r, BN_Mont *mont, BN_Optimizer *opt, bool consttime);
int32_t MontMulBinCore(BN_UINT *r, const BN_UINT *a, const BN_UINT *b, BN_Mont *mont,
BN_Optimizer *opt, bool consttime);
int32_t MontEncBinCore(BN_UINT *r, BN_Mont *mont, BN_Optimizer *opt, bool consttime);
void ReduceCore(BN_UINT *r, BN_UINT *x, const BN_UINT *m, uint32_t mSize, BN_UINT m0);
#ifdef __cplusplus
}
#endif
#endif /* HITLS_CRYPTO_BN */
#endif | 2302_82127028/openHiTLS-examples_1508 | crypto/bn/src/bn_bincal.h | C | unknown | 7,894 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#ifndef BN_BINCAL_ARMV8_H
#define BN_BINCAL_ARMV8_H
#include "hitls_build.h"
#ifdef HITLS_CRYPTO_BN
#include "bn_basic.h"
#ifdef __cplusplus
extern "c" {
#endif
// wh | wl = u * v
#define MUL_AB(wh, wl, u, v) \
{ \
__asm("mul %1, %2, %3 \n\t" \
"umulh %0, %2, %3 \n\t" \
: "=&r"(wh), "=&r"(wl) \
: "r"(u), "r"(v) \
: "cc"); \
}
// wh | wl = u ^ 2
#define SQR_A(wh, wl, u) \
{ \
__asm("mul %1, %2, %2 \n\t" \
"umulh %0, %2, %2 \n\t" \
: "=&r"(wh), "=&r"(wl) \
: "r"(u) \
: "cc"); \
}
/* nh|nl / d = q...r */
#define DIV_ND(q, r, nh, nl, d) \
do { \
BN_UINT macroTmpD1, macroTmpD0, macroTmpQ1, macroTmpQ0, macroTmpR1, macroTmpR0, macroTmpM; \
\
macroTmpD1 = BN_UINT_HI(d); \
macroTmpD0 = BN_UINT_LO(d); \
\
macroTmpQ1 = (nh) / macroTmpD1; \
macroTmpR1 = (nh) - macroTmpQ1 * macroTmpD1; \
macroTmpM = macroTmpQ1 * macroTmpD0; \
macroTmpR1 = (macroTmpR1 << (BN_UINT_BITS >> 1)) | BN_UINT_HI(nl); \
if (macroTmpR1 < macroTmpM) { \
macroTmpQ1--, macroTmpR1 += (d); \
if (macroTmpR1 >= (d)) { \
if (macroTmpR1 < macroTmpM) { \
macroTmpQ1--; \
macroTmpR1 += (d); \
} \
} \
} \
macroTmpR1 -= macroTmpM; \
\
macroTmpQ0 = macroTmpR1 / macroTmpD1; \
macroTmpR0 = macroTmpR1 - macroTmpQ0 * macroTmpD1; \
macroTmpM = macroTmpQ0 * macroTmpD0; \
macroTmpR0 = (macroTmpR0 << (BN_UINT_BITS >> 1)) | BN_UINT_LO(nl); \
if (macroTmpR0 < macroTmpM) { \
macroTmpQ0--, macroTmpR0 += (d); \
if (macroTmpR0 >= (d)) { \
if (macroTmpR0 < macroTmpM) { \
macroTmpQ0--; \
macroTmpR0 += (d); \
} \
} \
} \
macroTmpR0 -= macroTmpM; \
\
(q) = (macroTmpQ1 << (BN_UINT_BITS >> 1)) | macroTmpQ0; \
(r) = macroTmpR0; \
} while (0)
// (hi, lo) = a * b
// r += lo + carry
// carry = hi + c
#define MULADC_AB(r, a, b, carry) \
do { \
BN_UINT hi, lo; \
__asm("mul %0, %2, %3 \n\t" \
"umulh %1, %2, %3 \n\t" \
: "=&r"(lo), "=&r"(hi) \
: "r"(a), "r"(b) \
: "cc"); \
__asm("adds %1, %1, %3 \n\t" \
"adc %2, %2, xzr \n\t " \
"adds %0, %0, %1 \n\t" \
"adc %2, %2, xzr \n\t " \
"mov %1, %2 \n\t" \
: "+&r"(r), "+&r"(carry), "+&r"(hi) \
: "r"(lo) \
: "cc"); \
} while (0)
/* h|m|l = h|m|l + u * v. Ensure that the value of h is not too large to avoid carry. */
#define MULADD_AB(h, m, l, u, v) \
do { \
BN_UINT hi, lo; \
__asm("mul %0, %2, %3 \n\t" \
"umulh %1, %2, %3 \n\t" \
: "=&r"(lo), "=&r"(hi) \
: "r"(u), "r"(v) \
: "cc"); \
__asm("adds %0, %0, %3 \n\t " \
"adcs %1, %1, %4 \n\t " \
"adc %2, %2, xzr \n\t " \
: "+&r"(l), "+&r"(m), "+&r"(h) \
: "r"(lo), "r"(hi) \
: "cc"); \
} while (0)
/* h|m|l = h|m|l + 2 * u * v. Ensure that the value of h is not too large to avoid carry. */
#define MULADD_AB2(h, m, l, u, v) \
do { \
BN_UINT hi, lo; \
__asm("mul %0, %2, %3 \n\t" \
"umulh %1, %2, %3 \n\t" \
: "=&r"(lo), "=&r"(hi) \
: "r"(u), "r"(v) \
: "cc"); \
__asm("adds %0, %0, %3 \n\t " \
"adcs %1, %1, %4 \n\t " \
"adc %2, %2, xzr \n\t " \
"adds %0, %0, %3 \n\t " \
"adcs %1, %1, %4 \n\t " \
"adc %2, %2, xzr \n\t " \
: "+&r"(l), "+&r"(m), "+&r"(h) \
: "r"(lo), "r"(hi) \
: "cc"); \
} while (0)
/* h|m|l = h|m|l + u * u. Ensure that the value of h is not too large to avoid carry. */
#define SQRADD_A(h, m, l, u) MULADD_AB(h, m, l, u, u)
#ifdef __cplusplus
}
#endif
#endif /* HITLS_CRYPTO_BN */
#endif | 2302_82127028/openHiTLS-examples_1508 | crypto/bn/src/bn_bincal_armv8.h | C | unknown | 7,144 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#ifndef BN_BINCAL_NOASM_H
#define BN_BINCAL_NOASM_H
#include "hitls_build.h"
#ifdef HITLS_CRYPTO_BN
#include <stdint.h>
#include "bn_basic.h"
#ifdef __cplusplus
extern "c" {
#endif
/* nh|nl / d = q...r */
#define DIV_ND(q, r, nh, nl, d) \
do { \
BN_UINT macroTmpD1, macroTmpD0, macroTmpQ1, macroTmpQ0, macroTmpR1, macroTmpR0, macroTmpM; \
\
macroTmpD1 = BN_UINT_HI(d); \
macroTmpD0 = BN_UINT_LO(d); \
\
macroTmpQ1 = (nh) / macroTmpD1; \
macroTmpR1 = (nh) - macroTmpQ1 * macroTmpD1; \
macroTmpM = macroTmpQ1 * macroTmpD0; \
macroTmpR1 = (macroTmpR1 << (BN_UINT_BITS >> 1)) | BN_UINT_HI(nl); \
if (macroTmpR1 < macroTmpM) { \
macroTmpQ1--, macroTmpR1 += (d); \
if (macroTmpR1 >= (d)) { \
if (macroTmpR1 < macroTmpM) { \
macroTmpQ1--; \
macroTmpR1 += (d); \
} \
} \
} \
macroTmpR1 -= macroTmpM; \
\
macroTmpQ0 = macroTmpR1 / macroTmpD1; \
macroTmpR0 = macroTmpR1 - macroTmpQ0 * macroTmpD1; \
macroTmpM = macroTmpQ0 * macroTmpD0; \
macroTmpR0 = (macroTmpR0 << (BN_UINT_BITS >> 1)) | BN_UINT_LO(nl); \
if (macroTmpR0 < macroTmpM) { \
macroTmpQ0--, macroTmpR0 += (d); \
if (macroTmpR0 >= (d)) { \
if (macroTmpR0 < macroTmpM) { \
macroTmpQ0--; \
macroTmpR0 += (d); \
} \
} \
} \
macroTmpR0 -= macroTmpM; \
\
(q) = (macroTmpQ1 << (BN_UINT_BITS >> 1)) | macroTmpQ0; \
(r) = macroTmpR0; \
} while (0)
#define MUL_AB(wh, wl, u, v) \
do { \
BN_UINT macroTmpUl = BN_UINT_LO(u); \
BN_UINT macroTmpUh = BN_UINT_HI(u); \
BN_UINT macroTmpVl = BN_UINT_LO(v); \
BN_UINT macroTmpVh = BN_UINT_HI(v); \
\
BN_UINT macroTmpX0 = macroTmpUl * macroTmpVl; \
BN_UINT macroTmpX1 = macroTmpUl * macroTmpVh; \
BN_UINT macroTmpX2 = macroTmpUh * macroTmpVl; \
BN_UINT macroTmpX3 = macroTmpUh * macroTmpVh; \
\
macroTmpX1 += BN_UINT_HI(macroTmpX0); \
macroTmpX1 += macroTmpX2; \
if (macroTmpX1 < macroTmpX2) { macroTmpX3 += BN_UINT_HC; } \
\
(wh) = macroTmpX3 + BN_UINT_HI(macroTmpX1); \
(wl) = (macroTmpX1 << (BN_UINT_BITS >> 1)) | BN_UINT_LO(macroTmpX0); \
} while (0)
#define SQR_A(wh, wl, u) \
do { \
BN_UINT macroTmpUl = BN_UINT_LO(u); \
BN_UINT macroTmpUh = BN_UINT_HI(u); \
\
BN_UINT macroTmpX0 = macroTmpUl * macroTmpUl; \
BN_UINT macroTmpX1 = macroTmpUl * macroTmpUh; \
BN_UINT macroTmpX2 = macroTmpUh * macroTmpUh; \
\
BN_UINT macroTmpT = macroTmpX1 << 1; \
macroTmpT += BN_UINT_HI(macroTmpX0); \
if (macroTmpT < macroTmpX1) { macroTmpX2 += BN_UINT_HC; } \
\
(wh) = macroTmpX2 + BN_UINT_HI(macroTmpT); \
(wl) = (macroTmpT << (BN_UINT_BITS >> 1)) | BN_UINT_LO(macroTmpX0); \
} while (0)
/* r = a + b + c, input 'carry' means carry. Note that a and carry cannot be the same variable. */
#define ADD_ABC(carry, r, a, b, c) \
do { \
BN_UINT macroTmpS = (b) + (c); \
carry = (macroTmpS < (c)) ? 1 : 0; \
(r) = macroTmpS + (a); \
carry += ((r) < macroTmpS) ? 1 : 0; \
} while (0)
// (hi, lo) = a * b
// r += lo + carry
// carry = hi + c
#define MULADC_AB(r, a, b, carry) \
do { \
BN_UINT hi, lo; \
MUL_AB(hi, lo, a, b); \
ADD_ABC(carry, r, r, lo, carry); \
carry += hi; \
} while (0)
/* h|m|l = h|m|l + u * v. Ensure that the value of h is not too large to avoid carry. */
#define MULADD_AB(h, m, l, u, v) \
do { \
BN_UINT macroTmpUl = BN_UINT_LO(u); \
BN_UINT macroTmpUh = BN_UINT_HI(u); \
BN_UINT macroTmpVl = BN_UINT_LO(v); \
BN_UINT macroTmpVh = BN_UINT_HI(v); \
\
BN_UINT macroTmpX3 = macroTmpUh * macroTmpVh; \
BN_UINT macroTmpX2 = macroTmpUh * macroTmpVl; \
BN_UINT macroTmpX1 = macroTmpUl * macroTmpVh; \
BN_UINT macroTmpX0 = macroTmpUl * macroTmpVl; \
macroTmpX1 += BN_UINT_HI(macroTmpX0); \
macroTmpX0 = (u) * (v); \
macroTmpX1 += macroTmpX2; \
macroTmpX3 = macroTmpX3 + BN_UINT_HI(macroTmpX1); \
\
(l) += macroTmpX0; \
\
if (macroTmpX1 < macroTmpX2) { macroTmpX3 += BN_UINT_HC; } \
macroTmpX3 += ((l) < macroTmpX0); \
(m) += macroTmpX3; \
(h) += ((m) < macroTmpX3); \
} while (0)
/* h|m|l = h|m|l + 2 * u * v. Ensure that the value of h is not too large to avoid carry. */
#define MULADD_AB2(h, m, l, u, v) \
do { \
MULADD_AB((h), (m), (l), (u), (v)); \
MULADD_AB((h), (m), (l), (u), (v)); \
} while (0)
/* h|m|l = h|m|l + v * v. Ensure that the value of h is not too large to avoid carry. */
#define SQRADD_A(h, m, l, v) \
do { \
BN_UINT macroTmpVl = BN_UINT_LO(v); \
BN_UINT macroTmpVh = BN_UINT_HI(v); \
\
BN_UINT macroTmpX3 = macroTmpVh * macroTmpVh; \
BN_UINT macroTmpX2 = macroTmpVh * macroTmpVl; \
BN_UINT macroTmpX1 = macroTmpX2; \
BN_UINT macroTmpX0 = macroTmpVl * macroTmpVl; \
macroTmpX1 += BN_UINT_HI(macroTmpX0); \
macroTmpX0 = (v) * (v); \
macroTmpX1 += macroTmpX2; \
macroTmpX3 = macroTmpX3 + BN_UINT_HI(macroTmpX1); \
\
(l) += macroTmpX0; \
\
if (macroTmpX1 < macroTmpX2) { macroTmpX3 += BN_UINT_HC; } \
if ((l) < macroTmpX0) { macroTmpX3 += 1; } \
(m) += macroTmpX3; \
if ((m) < macroTmpX3) { (h)++; } \
} while (0)
#ifdef __cplusplus
}
#endif
#endif /* HITLS_CRYPTO_BN */
#endif | 2302_82127028/openHiTLS-examples_1508 | crypto/bn/src/bn_bincal_noasm.h | C | unknown | 9,332 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#ifndef BN_BINCAL_X8664_H
#define BN_BINCAL_X8664_H
#include "hitls_build.h"
#ifdef HITLS_CRYPTO_BN
#include <stdint.h>
#include "bn_basic.h"
#ifdef __cplusplus
extern "c" {
#endif
// wh | wl = u * v
#define MUL_AB(wh, wl, u, v) \
{ \
__asm("mulq %3" : "=d"(wh), "=a"(wl) : "a"(u), "r"(v) : "cc"); \
}
// wh | wl = u ^ 2
#define SQR_A(wh, wl, u) \
{ \
__asm("mulq %2 " : "=d"(wh), "=a"(wl) : "a"(u) : "cc"); \
}
// nh | nl / d = q...r
#define DIV_ND(q, r, nh, nl, d) \
{ \
__asm("divq %4" : "=a"(q), "=d"(r) : "d"(nh), "a"(nl), "r"(d) : "cc"); \
}
/* r += c
* c = carry
*/
#define ADD_CARRY(carry, r) \
do { \
__asm("addq %1, %0 \n\t " \
"adcq %4, %1 \n\t " \
"adcq $0, %2 \n\t " \
: "+m"(l), "+r"(carry) \
: \
: "cc"); \
} while (0)
/* h|m|l = h|m|l + u * v. Ensure that the value of h is not too large to avoid carry. */
#define MULXADD_AB(h, m, l, u, v) \
do { \
BN_UINT hi, lo; \
__asm("mulq %0, %1, %2" : "=a"(lo), "=d"(hi) : "a"(u), "m"(v) : "cc"); \
__asm("addq %3, %0 \n\t " \
"adcq %4, %1 \n\t " \
"adcq $0, %2 \n\t " \
: "+r"(l), "+r"(m), "+r"(h) \
: "r"(lo), "r"(hi) \
: "cc"); \
} while (0)
// (hi, lo) = a * b
// r += lo + carry
// carry = hi + c
#define MULADC_AB(r, a, b, carry) \
do { \
BN_UINT hi, lo; \
__asm("mulq %3" : "=a"(lo), "=d"(hi) : "a"(a), "g"(b) : "cc"); \
__asm("addq %3, %1 \n\t" \
"adcq $0, %2 \n\t" \
"addq %1, %0 \n\t" \
"adcq $0, %2 \n\t" \
"movq %2, %1 \n\t" \
: "+r"(r), "+r"(carry), "+r"(hi) \
: "r"(lo) \
: "cc"); \
} while (0)
/* h|m|l = h|m|l + u * v. Ensure that the value of h is not too large to avoid carry. */
#define MULADD_AB(h, m, l, u, v) \
do { \
BN_UINT hi, lo; \
__asm("mulq %3" : "=a"(lo), "=d"(hi) : "a"(u), "m"(v) : "cc"); \
__asm("addq %3, %0 \n\t " \
"adcq %4, %1 \n\t " \
"adcq $0, %2 \n\t " \
: "+r"(l), "+r"(m), "+r"(h) \
: "r"(lo), "r"(hi) \
: "cc"); \
} while (0)
/* h|m|l = h|m|l + 2 * u * v. Ensure that the value of h is not too large to avoid carry. */
#define MULADD_AB2(h, m, l, u, v) \
do { \
BN_UINT hi, lo; \
__asm("mulq %3" : "=a"(lo), "=d"(hi) : "a"(u), "m"(v) : "cc"); \
__asm("addq %3, %0 \n\t " \
"adcq %4, %1 \n\t " \
"adcq $0, %2 \n\t " \
"addq %3, %0 \n\t " \
"adcq %4, %1 \n\t " \
"adcq $0, %2 \n\t " \
: "+r"(l), "+r"(m), "+r"(h) \
: "r"(lo), "r"(hi) \
: "cc"); \
} while (0)
/* h|m|l = h|m|l + u * u. Ensure that the value of h is not too large to avoid carry. */
#define SQRADD_A(h, m, l, u) MULADD_AB(h, m, l, u, u)
#ifdef __cplusplus
}
#endif
#endif /* HITLS_CRYPTO_BN */
#endif | 2302_82127028/openHiTLS-examples_1508 | crypto/bn/src/bn_bincal_x8664.h | C | unknown | 5,705 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#include "hitls_build.h"
#if defined(HITLS_CRYPTO_BN) && defined(HITLS_CRYPTO_BN_COMBA)
#include <stdint.h>
#include "bn_bincal.h"
#define SQR_COMBA_BEGIN_1(r, a, h, m, l) do { \
SQRADD_A((h), (m), (l), (a)[0]); \
(r)[0] = (l); \
(l) = 0; \
MULADD_AB2((l), (h), (m), (a)[0], (a)[1]); \
(r)[1] = (m); \
(m) = 0; \
MULADD_AB2((m), (l), (h), (a)[0], (a)[2]); /* 0 + 2 = 2 */ \
SQRADD_A((m), (l), (h), (a)[1]); /* 1 + 1 = 2 */ \
(r)[2] = (h); /* 2 */ \
(h) = 0; \
MULADD_AB2((h), (m), (l), (a)[1], (a)[2]); /* 1 + 2 = 3 */ \
MULADD_AB2((h), (m), (l), (a)[0], (a)[3]); /* 0 + 3 = 3 */ \
(r)[3] = (l); /* 3 */ \
(l) = 0; \
} while (0)
#define SQR_COMBA_BEGIN_2(r, a, h, m, l) do { \
MULADD_AB2((l), (h), (m), (a)[0], (a)[4]); /* 0 + 4 = 4 */ \
MULADD_AB2((l), (h), (m), (a)[1], (a)[3]); /* 1 + 3 = 4 */ \
SQRADD_A((l), (h), (m), (a)[2]); /* 2 + 2 = 4 */ \
(r)[4] = (m); /* 4 */ \
(m) = 0; \
MULADD_AB2((m), (l), (h), (a)[2], (a)[3]); /* 2 + 3 = 5 */ \
MULADD_AB2((m), (l), (h), (a)[1], (a)[4]); /* 1 + 4 = 5 */ \
MULADD_AB2((m), (l), (h), (a)[0], (a)[5]); /* 0 + 5 = 5 */ \
(r)[5] = (h); /* 5 */ \
(h) = 0; \
} while (0)
#define MUL_COMBA_BEGIN_1(r, a, b, h, m, l) do { \
MULADD_AB((h), (m), (l), (a)[0], (b)[0]); \
(r)[0] = (l); \
(l) = 0; \
MULADD_AB((l), (h), (m), (a)[0], (b)[1]); \
MULADD_AB((l), (h), (m), (a)[1], (b)[0]); \
(r)[1] = (m); \
(m) = 0; \
MULADD_AB((m), (l), (h), (a)[2], (b)[0]); /* 2 + 0 = 2 */ \
MULADD_AB((m), (l), (h), (a)[1], (b)[1]); /* 1 + 1 = 2 */ \
MULADD_AB((m), (l), (h), (a)[0], (b)[2]); /* 0 + 2 = 2 */ \
(r)[2] = (h); \
(h) = 0; \
MULADD_AB((h), (m), (l), (a)[0], (b)[3]); /* 0 + 3 = 3 */ \
MULADD_AB((h), (m), (l), (a)[1], (b)[2]); /* 1 + 2 = 3 */ \
MULADD_AB((h), (m), (l), (a)[2], (b)[1]); /* 2 + 1 = 3 */ \
MULADD_AB((h), (m), (l), (a)[3], (b)[0]); /* 3 + 9 = 3 */ \
(r)[3] = (l); /* 3 */ \
(l) = 0; \
} while (0)
#define MUL_COMBA_BEGIN_2(r, a, b, h, m, l) do { \
MULADD_AB((l), (h), (m), (a)[4], (b)[0]); /* 4 + 0 = 4 */ \
MULADD_AB((l), (h), (m), (a)[3], (b)[1]); /* 3 + 1 = 4 */ \
MULADD_AB((l), (h), (m), (a)[2], (b)[2]); /* 2 + 2 = 4 */ \
MULADD_AB((l), (h), (m), (a)[1], (b)[3]); /* 1 + 3 = 4 */ \
MULADD_AB((l), (h), (m), (a)[0], (b)[4]); /* 0 + 4 = 4 */ \
(r)[4] = (m); /* 4 */ \
(m) = 0; \
MULADD_AB((m), (l), (h), (a)[0], (b)[5]); /* 0 + 5 = 5 */ \
MULADD_AB((m), (l), (h), (a)[1], (b)[4]); /* 1 + 4 = 5 */ \
MULADD_AB((m), (l), (h), (a)[2], (b)[3]); /* 2 + 3 = 5 */ \
MULADD_AB((m), (l), (h), (a)[3], (b)[2]); /* 3 + 2 = 5 */ \
MULADD_AB((m), (l), (h), (a)[4], (b)[1]); /* 4 + 1 = 5 */ \
MULADD_AB((m), (l), (h), (a)[5], (b)[0]); /* 5 + 0 = 5 */ \
(r)[5] = (h); /* 5 */ \
(h) = 0; \
} while (0)
#define MUL_COMBA_BEGIN_3(r, a, b, h, m, l) do { \
MULADD_AB((h), (m), (l), (a)[6], (b)[0]); /* 6 + 0 = 6 */ \
MULADD_AB((h), (m), (l), (a)[5], (b)[1]); /* 5 + 1 = 6 */ \
MULADD_AB((h), (m), (l), (a)[4], (b)[2]); /* 4 + 2 = 6 */ \
MULADD_AB((h), (m), (l), (a)[3], (b)[3]); /* 3 + 3 = 6 */ \
MULADD_AB((h), (m), (l), (a)[2], (b)[4]); /* 2 + 4 = 6 */ \
MULADD_AB((h), (m), (l), (a)[1], (b)[5]); /* 1 + 5 = 6 */ \
MULADD_AB((h), (m), (l), (a)[0], (b)[6]); /* 0 + 6 = 6 */ \
(r)[6] = (l); /* 6 */ \
(l) = 0; \
MULADD_AB((l), (h), (m), (a)[0], (b)[7]); /* 0 + 7 = 7 */ \
MULADD_AB((l), (h), (m), (a)[1], (b)[6]); /* 1 + 6 = 7 */ \
MULADD_AB((l), (h), (m), (a)[2], (b)[5]); /* 2 + 5 = 7 */ \
MULADD_AB((l), (h), (m), (a)[3], (b)[4]); /* 3 + 4 = 7 */ \
MULADD_AB((l), (h), (m), (a)[4], (b)[3]); /* 4 + 3 = 7 */ \
MULADD_AB((l), (h), (m), (a)[5], (b)[2]); /* 5 + 2 = 7 */ \
MULADD_AB((l), (h), (m), (a)[6], (b)[1]); /* 6 + 1 = 7 */ \
MULADD_AB((l), (h), (m), (a)[7], (b)[0]); /* 7 + 0 = 7 */ \
(r)[7] = (m); /* 7 */ \
(m) = 0; \
MULADD_AB((m), (l), (h), (a)[7], (b)[1]); /* 7 + 1 = 8 */ \
MULADD_AB((m), (l), (h), (a)[6], (b)[2]); /* 6 + 2 = 8 */ \
MULADD_AB((m), (l), (h), (a)[5], (b)[3]); /* 5 + 3 = 8 */ \
MULADD_AB((m), (l), (h), (a)[4], (b)[4]); /* 4 + 4 = 8 */ \
MULADD_AB((m), (l), (h), (a)[3], (b)[5]); /* 3 + 5 = 8 */ \
MULADD_AB((m), (l), (h), (a)[2], (b)[6]); /* 2 + 6 = 8 */ \
MULADD_AB((m), (l), (h), (a)[1], (b)[7]); /* 1 + 7 = 8 */ \
(r)[8] = (h); /* 8 */ \
(h) = 0; \
} while (0)
static void SqrComba8(BN_UINT *r, const BN_UINT *a)
{
BN_UINT h = 0;
BN_UINT m = 0;
BN_UINT l = 0;
SQR_COMBA_BEGIN_1(r, a, h, m, l);
SQR_COMBA_BEGIN_2(r, a, h, m, l);
MULADD_AB2(h, m, l, a[0], a[6]); /* 0 + 6 = 6 */
MULADD_AB2(h, m, l, a[1], a[5]); /* 1 + 5 = 6 */
MULADD_AB2(h, m, l, a[2], a[4]); /* 2 + 4 = 6 */
SQRADD_A(h, m, l, a[3]); /* 3 + 3 = 6 */
r[6] = l; /* 6 */
l = 0;
MULADD_AB2(l, h, m, a[3], a[4]); /* 3 + 4 = 7 */
MULADD_AB2(l, h, m, a[2], a[5]); /* 2 + 5 = 7 */
MULADD_AB2(l, h, m, a[1], a[6]); /* 1 + 6 = 7 */
MULADD_AB2(l, h, m, a[0], a[7]); /* 0 + 7 = 7 */
r[7] = m; /* 7 */
m = 0;
MULADD_AB2(m, l, h, a[1], a[7]); /* 1 + 7 = 8 */
MULADD_AB2(m, l, h, a[2], a[6]); /* 2 + 6 = 8 */
MULADD_AB2(m, l, h, a[3], a[5]); /* 3 + 5 = 8 */
SQRADD_A(m, l, h, a[4]); /* 4 + 4 = 8 */
r[8] = h; /* 8 */
h = 0;
MULADD_AB2(h, m, l, a[4], a[5]); /* 4 + 5 = 9 */
MULADD_AB2(h, m, l, a[3], a[6]); /* 3 + 6 = 9 */
MULADD_AB2(h, m, l, a[2], a[7]); /* 2 + 7 = 9 */
r[9] = l; /* 9 */
l = 0;
MULADD_AB2(l, h, m, a[3], a[7]); /* 3 + 7 = 10 */
MULADD_AB2(l, h, m, a[4], a[6]); /* 4 + 6 = 10 */
SQRADD_A(l, h, m, a[5]); /* 5 + 5 = 10 */
r[10] = m; /* 10 */
m = 0;
MULADD_AB2(m, l, h, a[5], a[6]); /* 5 + 6 = 11 */
MULADD_AB2(m, l, h, a[4], a[7]); /* 4 + 7 = 11 */
r[11] = h; /* 11 */
h = 0;
MULADD_AB2(h, m, l, a[5], a[7]); /* 5 + 7 = 12 */
SQRADD_A(h, m, l, a[6]); /* 6 + 6 = 12 */
r[12] = l; /* 12 */
l = 0;
MULADD_AB2(l, h, m, a[6], a[7]); /* 6 + 7 = 13 */
r[13] = m; /* 13 */
m = 0;
SQRADD_A(m, l, h, a[7]); /* 7 + 7 = 14 */
r[14] = h; /* 14 */
r[15] = l; /* 15 */
}
void SqrComba6(BN_UINT *r, const BN_UINT *a)
{
BN_UINT h = 0;
BN_UINT m = 0;
BN_UINT l = 0;
SQR_COMBA_BEGIN_1(r, a, h, m, l);
SQR_COMBA_BEGIN_2(r, a, h, m, l);
MULADD_AB2(h, m, l, a[1], a[5]); /* 1 + 5 = 6 */
MULADD_AB2(h, m, l, a[2], a[4]); /* 2 + 4 = 6 */
SQRADD_A(h, m, l, a[3]); /* 3 + 3 = 6 */
r[6] = l; /* 6 */
l = 0;
MULADD_AB2(l, h, m, a[3], a[4]); /* 3 + 4 = 7 */
MULADD_AB2(l, h, m, a[2], a[5]); /* 2 + 5 = 7 */
r[7] = m; /* 7 */
m = 0;
MULADD_AB2(m, l, h, a[3], a[5]); /* 3 + 5 = 8 */
SQRADD_A(m, l, h, a[4]); /* 4 + 4 = 8 */
r[8] = h; /* 8 */
h = 0;
MULADD_AB2(h, m, l, a[4], a[5]); /* 4 + 5 = 9 */
r[9] = l; /* 9 */
l = 0;
SQRADD_A(l, h, m, a[5]); /* 5 + 5 = 10 */
r[10] = m; /* 10 */
r[11] = h; /* 11 */
}
void SqrComba4(BN_UINT *r, const BN_UINT *a)
{
BN_UINT h = 0;
BN_UINT m = 0;
BN_UINT l = 0;
SQR_COMBA_BEGIN_1(r, a, h, m, l);
MULADD_AB2(l, h, m, a[1], a[3]); /* 1 + 3 = 4 */
SQRADD_A(l, h, m, a[2]); /* 2 + 2 = 4 */
r[4] = m; /* 4 */
m = 0;
MULADD_AB2(m, l, h, a[2], a[3]); /* 2 + 3 = 5 */
r[5] = h; /* 5 */
h = 0;
SQRADD_A(h, m, l, a[3]); /* 3 + 3 = 6 */
r[6] = l; /* 6 */
r[7] = m; /* 7 */
}
static void SqrComba(BN_UINT *r, const BN_UINT *a, uint32_t size)
{
BN_UINT h = 0;
BN_UINT m = 0;
BN_UINT l = 0;
if (size == 3) { /* 3 */
SQRADD_A(h, m, l, a[0]);
r[0] = l;
l = 0;
MULADD_AB2(l, h, m, a[0], a[1]);
r[1] = m;
m = 0;
MULADD_AB2(m, l, h, a[0], a[2]); /* 0 + 2 = 2 */
SQRADD_A(m, l, h, a[1]); /* 1 + 1 = 2 */
r[2] = h; /* 2 */
h = 0;
MULADD_AB2(h, m, l, a[1], a[2]); /* 1 + 2 = 3 */
r[3] = l; /* 3 */
l = 0;
SQRADD_A(l, h, m, a[2]); /* 2 + 2 = 4 */
r[4] = m; /* 4 */
r[5] = h; /* 5 */
return;
}
if (size == 2) { /* 2 */
SQRADD_A(h, m, l, a[0]);
r[0] = l;
l = 0;
MULADD_AB2(l, h, m, a[0], a[1]);
r[1] = m;
m = 0;
SQRADD_A(m, l, h, a[1]); /* 1 + 1 = 2 */
r[2] = h; /* 2 */
r[3] = l; /* 3 */
return;
}
SQR_A(r[1], r[0], a[0]); /* size == 1 */
}
void MulComba8(BN_UINT *r, const BN_UINT *a, const BN_UINT *b)
{
BN_UINT h = 0;
BN_UINT m = 0;
BN_UINT l = 0;
MUL_COMBA_BEGIN_1(r, a, b, h, m, l);
MUL_COMBA_BEGIN_2(r, a, b, h, m, l);
MUL_COMBA_BEGIN_3(r, a, b, h, m, l);
MULADD_AB(h, m, l, a[2], b[7]); /* 2 + 7 = 9 */
MULADD_AB(h, m, l, a[3], b[6]); /* 3 + 6 = 9 */
MULADD_AB(h, m, l, a[4], b[5]); /* 4 + 5 = 9 */
MULADD_AB(h, m, l, a[5], b[4]); /* 5 + 4 = 9 */
MULADD_AB(h, m, l, a[6], b[3]); /* 6 + 3 = 9 */
MULADD_AB(h, m, l, a[7], b[2]); /* 7 + 2 = 9 */
r[9] = l; /* 9 */
l = 0;
MULADD_AB(l, h, m, a[7], b[3]); /* 7 + 3 = 10 */
MULADD_AB(l, h, m, a[6], b[4]); /* 6 + 4 = 10 */
MULADD_AB(l, h, m, a[5], b[5]); /* 5 + 5 = 10 */
MULADD_AB(l, h, m, a[4], b[6]); /* 4 + 6 = 10 */
MULADD_AB(l, h, m, a[3], b[7]); /* 3 + 7 = 10 */
r[10] = m; /* 10 */
m = 0;
MULADD_AB(m, l, h, a[4], b[7]); /* 4 + 7 = 11 */
MULADD_AB(m, l, h, a[5], b[6]); /* 5 + 6 = 11 */
MULADD_AB(m, l, h, a[6], b[5]); /* 6 + 5 = 11 */
MULADD_AB(m, l, h, a[7], b[4]); /* 7 + 4 = 11 */
r[11] = h; /* 11 */
h = 0;
MULADD_AB(h, m, l, a[7], b[5]); /* 7 + 5 = 12 */
MULADD_AB(h, m, l, a[6], b[6]); /* 6 + 6 = 12 */
MULADD_AB(h, m, l, a[5], b[7]); /* 5 + 7 = 12 */
r[12] = l; /* 12 */
l = 0;
MULADD_AB(l, h, m, a[6], b[7]); /* 6 + 7 = 13 */
MULADD_AB(l, h, m, a[7], b[6]); /* 7 + 6 = 13 */
r[13] = m; /* 13 */
m = 0;
MULADD_AB(m, l, h, a[7], b[7]); /* 7 + 7 = 14 */
r[14] = h; /* 14 */
r[15] = l; /* 15 */
}
void MulComba6(BN_UINT *r, const BN_UINT *a, const BN_UINT *b)
{
BN_UINT h = 0;
BN_UINT m = 0;
BN_UINT l = 0;
MUL_COMBA_BEGIN_1(r, a, b, h, m, l);
MUL_COMBA_BEGIN_2(r, a, b, h, m, l);
MULADD_AB(h, m, l, a[5], b[1]); /* 5 + 1 = 6 */
MULADD_AB(h, m, l, a[4], b[2]); /* 4 + 2 = 6 */
MULADD_AB(h, m, l, a[3], b[3]); /* 3 + 3 = 6 */
MULADD_AB(h, m, l, a[2], b[4]); /* 2 + 4 = 6 */
MULADD_AB(h, m, l, a[1], b[5]); /* 1 + 5 = 6 */
r[6] = l; /* 6 */
l = 0;
MULADD_AB(l, h, m, a[2], b[5]); /* 2 + 5 = 7 */
MULADD_AB(l, h, m, a[3], b[4]); /* 3 + 4 = 7 */
MULADD_AB(l, h, m, a[4], b[3]); /* 4 + 3 = 7 */
MULADD_AB(l, h, m, a[5], b[2]); /* 5 + 2 = 7 */
r[7] = m; /* 7 */
m = 0;
MULADD_AB(m, l, h, a[5], b[3]); /* 5 + 3 = 8 */
MULADD_AB(m, l, h, a[4], b[4]); /* 4 + 4 = 8 */
MULADD_AB(m, l, h, a[3], b[5]); /* 3 + 5 = 8 */
r[8] = h; /* 8 */
h = 0;
MULADD_AB(h, m, l, a[4], b[5]); /* 4 + 5 = 9 */
MULADD_AB(h, m, l, a[5], b[4]); /* 5 + 4 = 9 */
r[9] = l; /* 9 */
l = 0;
MULADD_AB(l, h, m, a[5], b[5]); /* 5 + 5 = 10 */
r[10] = m; /* 10 */
r[11] = h; /* 11 */
}
void MulComba4(BN_UINT *r, const BN_UINT *a, const BN_UINT *b)
{
BN_UINT h = 0;
BN_UINT m = 0;
BN_UINT l = 0;
MUL_COMBA_BEGIN_1(r, a, b, h, m, l);
MULADD_AB(l, h, m, a[3], b[1]); /* 3 + 1 = 4 */
MULADD_AB(l, h, m, a[2], b[2]); /* 2 + 2 = 4 */
MULADD_AB(l, h, m, a[1], b[3]); /* 1 + 3 = 4 */
r[4] = m; /* 4 */
m = 0;
MULADD_AB(m, l, h, a[2], b[3]); /* 2 + 3 = 5 */
MULADD_AB(m, l, h, a[3], b[2]); /* 3 + 2 = 5 */
r[5] = h; /* 5 */
h = 0;
MULADD_AB(h, m, l, a[3], b[3]); /* 3 + 3 = 6 */
r[6] = l; /* 6 */
r[7] = m; /* 7 */
}
static void MulComba(BN_UINT *r, const BN_UINT *a, const BN_UINT *b, uint32_t size)
{
BN_UINT h = 0;
BN_UINT m = 0;
BN_UINT l = 0;
if (size == 3) { /* 3 */
MULADD_AB(h, m, l, a[0], b[0]);
r[0] = l;
l = 0;
MULADD_AB(l, h, m, a[0], b[1]);
MULADD_AB(l, h, m, a[1], b[0]);
r[1] = m;
m = 0;
MULADD_AB(m, l, h, a[2], b[0]); /* 2 + 0 = 2 */
MULADD_AB(m, l, h, a[1], b[1]); /* 1 + 1 = 2 */
MULADD_AB(m, l, h, a[0], b[2]); /* 0 + 2 = 2 */
r[2] = h;
h = 0;
MULADD_AB(h, m, l, a[1], b[2]); /* 1 + 2 = 3 */
MULADD_AB(h, m, l, a[2], b[1]); /* 2 + 1 = 3 */
r[3] = l; /* 3 */
l = 0;
MULADD_AB(l, h, m, a[2], b[2]); /* 2 + 2 = 4 */
r[4] = m; /* 4 */
r[5] = h; /* 5 */
return;
}
if (size == 2) { /* 2 */
MULADD_AB(h, m, l, a[0], b[0]);
r[0] = l;
l = 0;
MULADD_AB(l, h, m, a[0], b[1]);
MULADD_AB(l, h, m, a[1], b[0]);
r[1] = m;
m = 0;
MULADD_AB(m, l, h, a[1], b[1]);
r[2] = h; /* 2 */
r[3] = l; /* 3 */
return;
}
MUL_AB(r[1], r[0], a[0], b[0]); /* size == 1 */
}
uint32_t SpaceSize(uint32_t size)
{
uint32_t base = 8; /* Perform 8x batch processing */
while (size > base) {
base <<= 1;
}
return base * 4; /* 2x expansion. Each layer requires 2 * size temporary space, 2 * 2 = 4 */
}
/* compare BN array.
* return 0, if a == b
* return 1, if a > b
* return -1, if a < b
*/
static int32_t BinCmpSame(const BN_UINT *a, const BN_UINT *b, uint32_t size)
{
int64_t idx = (int64_t)size;
while (--idx >= 0) {
if (a[idx] != b[idx]) {
return a[idx] > b[idx] ? 1 : -1;
}
}
return 0;
}
/* The caller promised that aSize >= bSize.
* r = ABS(a - b). Need to ensure that aSize == bSize + (0 || 1).
* return 0 if a > b
* return 1 if a <= b
*/
static uint32_t ABS_Sub(BN_UINT *t, const BN_UINT *a, uint32_t aSize, const BN_UINT *b, uint32_t bSize)
{
int32_t ret;
do {
if (aSize > bSize) {
t[bSize] = a[bSize]; /* bSize = aSize - 1 */
if (a[bSize] > 0) {
ret = 1;
break;
}
}
ret = BinCmpSame(a, b, bSize);
} while (0);
if (ret > 0) {
BN_UINT borrow = BinSub(t, a, b, bSize);
if (aSize > bSize) { /* When the length difference exists and a > b exists, the borrowing is processed. */
t[bSize] -= borrow;
}
return 0;
} else {
BinSub(t, b, a, bSize);
return 1;
}
return 0;
}
/** Only aSize == bSize is supported. This interface will recurse. The recursion depth is O(deep) = log2(size)
* Ensure that space >= SpaceSize(size)
* (ah|al * bh|bl) = (((ah*bh) << 2) + (((ah*bh) + (al*bl) - (ah - al)(bh - bl)) << 1) + (al*bl))
*/
void MulConquer(BN_UINT *r, const BN_UINT *a, const BN_UINT *b, uint32_t size, BN_UINT *space, bool consttime)
{
if (!consttime) {
if (size == 8) { /* Perform 8x batch processing */
MulComba8(r, a, b);
return;
}
if (size == 6) { /* Perform 6x batch processing */
MulComba6(r, a, b);
return;
}
if (size == 4) { /* Perform 4x batch processing */
MulComba4(r, a, b);
return;
}
if (size < 4) { /* Less than 4, simple processing */
MulComba(r, a, b, size);
return;
}
} else if (size <= 8) { /* Calculate if the block size is smaller than 8. */
BinMul(r, size << 1, a, size, b, size);
return;
}
/* truncates the length of the low bits of the BigNum, that is the length of al bl. */
const uint32_t sizeLo = size >> 1;
const uint32_t sizeLo2 = sizeLo << 1;
const uint32_t shift1 = sizeLo; /* (((ah*bh) + (al*bl) - (ah - al)(bh - bl)) << 1) location */
const uint32_t shift2 = shift1 << 1; /* ((ah*bh) << 2) location */
/* truncates the length of the high bits of the BigNum, that is the length of ah bh. */
const uint32_t sizeHi = size - sizeLo;
const uint32_t sizeHi2 = sizeHi << 1;
/* Split the input 'space'. The current function uses tmp1 and tmp2,
* and the remaining newspace is used by the lower layer.
* space = tmp1_lo..tmp1_hi | tmp2_lo..tmp2_hi | newSpace, sizeof(tmp1_lo) == sizeHi.
*/
BN_UINT *tmp1 = space;
BN_UINT *tmp2 = tmp1 + sizeHi2;
BN_UINT *newSpace = tmp2 + sizeHi2;
/* tmp2_lo = (ah-al) */
uint32_t sign = ABS_Sub(tmp2, a + shift1, sizeHi, a, sizeLo);
/* tmp2_hi = (bh-bl) */
sign ^= ABS_Sub(tmp2 + sizeHi, b + shift1, sizeHi, b, sizeLo);
MulConquer(r, a, b, sizeLo, newSpace, consttime); /* calculate (al*bl) */
MulConquer(r + shift2, a + shift1, b + shift1, sizeHi, newSpace, consttime); /* calculate (ah*bh) */
MulConquer(tmp1, tmp2, tmp2 + sizeHi, sizeHi, newSpace, consttime); /* calculate (ah-al)(bh-bl) */
/* At this time r has stored ((ah*bh) << 2) and (al*bl) */
/* carry should be added in (r + shift1)[sizeHi * 2] */
/* tmp2 is (ah*bh) + (al*bl), but the processing length here is sizeLo * 2 */
BN_UINT carry = BinAdd(tmp2, r, r + shift2, sizeLo2);
if (sizeHi > sizeLo) {
/* If there is a length difference, the length of (ah*bh) is sizeLo * 2 + 2,
and the tail of (ah*bh) needs to be processed. */
/* point to (r + shift2)[sizeLo * 2], the unprocessed tail of (ah*bh) */
const uint32_t position = shift2 + (sizeLo2);
tmp2[sizeLo2] = r[position] + carry;
carry = (tmp2[sizeLo2] < carry) ? 1 : 0;
/* continue the processing */
tmp2[(sizeLo2) + 1] = r[position + 1] + carry;
carry = (tmp2[sizeLo2 + 1] < carry) ? 1 : 0;
}
/* tmp1 = (ah*bh) + (al*bl) - (ah-al)(bh-bl), tmp2 is (ah*bh) + (al*bl) */
if (sign == 1) {
carry += BinAdd(tmp1, tmp2, tmp1, sizeHi2);
} else {
carry -= BinSub(tmp1, tmp2, tmp1, sizeHi2);
}
/* finally r adds tmp1, that is (ah*bh) + (al*bl) - (ah - al)(bh - bl) */
carry += BinAdd(r + shift1, r + shift1, tmp1, sizeHi2);
for (uint32_t i = shift1 + sizeHi2; carry > 0 && i < (size << 1); i++) {
ADD_AB(carry, r[i], r[i], carry);
}
}
/** This interface will recurse. The recursion depth is O(deep) = log2(size)
* Ensure that space >= SpaceSize(size)
* (x|y)^2 = ((x^2 << 2) + ((x^2 + y^2 - (x - y)^2)) << 1) + y^2)
*/
void SqrConquer(BN_UINT *r, const BN_UINT *a, uint32_t size, BN_UINT *space, bool consttime)
{
if (!consttime) {
if (size == 8) { /* Perform 8x batch processing */
SqrComba8(r, a);
return;
}
if (size == 6) { /* Perform 6x batch processing */
SqrComba6(r, a);
return;
}
if (size == 4) { /* Perform 4x batch processing */
SqrComba4(r, a);
return;
}
if (size < 4) { /* Less than 4, simple processing */
SqrComba(r, a, size);
return;
}
} else if (size <= 8) { /* Calculate if the block size is smaller than 8. */
BinSqr(r, size << 1, a, size);
return;
}
/* truncates the length of the high bits of the BigNum, that is the length of x. */
const uint32_t sizeHi = (size + 1) >> 1;
/* truncates the length of the low bits of the BigNum, that is the length of y. */
const uint32_t sizeLo = size >> 1;
const uint32_t shift1 = sizeLo; /* ((x^2 + y^2 - (x - y)^2)) << 1) location */
const uint32_t shift2 = shift1 << 1; /* ((x^2 << 2) location */
/* Split the input 'space'. The current function uses tmp1 and tmp2,
and the remaining newspace is used by the lower layer. */
BN_UINT *tmp1 = space;
BN_UINT *tmp2 = tmp1 + (sizeHi << 1);
BN_UINT *newSpace = tmp2 + (sizeHi << 1);
/* tmp2 is the upper bits of num minus the lower bits of num, (x-y) */
(void)ABS_Sub(tmp2, a + shift1, sizeHi, a, sizeLo);
SqrConquer(r, a, sizeLo, newSpace, consttime); /* calculate y^2 */
SqrConquer(r + shift2, a + shift1, sizeHi, newSpace, consttime); /* calculate x^2 */
SqrConquer(tmp1, tmp2, sizeHi, newSpace, consttime); /* calculate (x-y)^2 */
/* At this time r has stored (x^2 << 2) and y^2 */
/* carry should be added in (r + shift1)[sizeHi * 2] */
/* tmp2 = x^2 + y^2, but the processing length here is sizeLo * 2 */
BN_UINT carry = BinAdd(tmp2, r, r + shift2, sizeLo << 1);
if (sizeHi > sizeLo) {
/* If there is a length difference, the length of x^2 is sizeLo * 2 + 2,
and the tail of x^2 needs to be processed. */
/* point to (r + shift2)[sizeLo * 2], the unprocessed tail of x^2 */
const uint32_t position = shift2 + (sizeLo << 1);
tmp2[sizeLo << 1] = r[position] + carry;
carry = (tmp2[sizeLo << 1] < carry) ? 1 : 0;
/* continue the processing */
tmp2[(sizeLo << 1) + 1] = r[position + 1] + carry;
carry = (tmp2[(sizeLo << 1) + 1] < carry) ? 1 : 0;
}
/* tmp1 = x^2 + y^2 - (x-y)^2, tmp2 is x^2 + y^2 */
carry -= BinSub(tmp1, tmp2, tmp1, sizeHi << 1);
/* finally r adds x^2 + y^2 - (x-y)^2 */
carry += BinAdd(r + shift1, r + shift1, tmp1, sizeHi << 1);
uint32_t i;
for (i = shift1 + (sizeHi << 1); carry > 0 && i < (size << 1); i++) {
ADD_AB(carry, r[i], r[i], carry);
}
}
#endif /* HITLS_CRYPTO_BN_COMBA */
| 2302_82127028/openHiTLS-examples_1508 | crypto/bn/src/bn_comba.c | C | unknown | 25,469 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#include "hitls_build.h"
#ifdef HITLS_CRYPTO_BN_PRIME_RFC3526
#include "crypt_errno.h"
#include "bn_basic.h"
#if defined(HITLS_SIXTY_FOUR_BITS)
// RFC 3526: 2048-bit MODP GroupUL, this prime is: 2^2048 - 2^1984 - 1 + 2^64 * { [2^1918 pi] + 124476 }
static const BN_UINT RFC3526_PRIME_2048[] = {
0xFFFFFFFFFFFFFFFFUL, 0x15728E5A8AACAA68UL, 0x15D2261898FA0510UL, 0x3995497CEA956AE5UL,
0xDE2BCBF695581718UL, 0xB5C55DF06F4C52C9UL, 0x9B2783A2EC07A28FUL, 0xE39E772C180E8603UL,
0x32905E462E36CE3BUL, 0xF1746C08CA18217CUL, 0x1C62F356208552BBUL, 0x83655D23DCA3AD96UL,
0x69163FA8FD24CF5FUL, 0x98DA48361C55D39AUL, 0xC2007CB8A163BF05UL, 0x49286651ECE45B3DUL,
0xAE9F24117C4B1FE6UL, 0xEE386BFB5A899FA5UL, 0x0BFF5CB6F406B7EDUL, 0xF44C42E9A637ED6BUL,
0xE485B576625E7EC6UL, 0x4FE1356D6D51C245UL, 0x302B0A6DF25F1437UL, 0xEF9519B3CD3A431BUL,
0x514A08798E3404DDUL, 0x020BBEA63B139B22UL, 0x29024E088A67CC74UL, 0xC4C6628B80DC1CD1UL,
0xC90FDAA22168C234UL, 0xFFFFFFFFFFFFFFFFUL
};
// RFC 3526: 3072-bit MODP GroupUL, this prime is: 2^3072 - 2^3008 - 1 + 2^64 * { [2^2942 pi] + 1690314 }
static const BN_UINT RFC3526_PRIME_3072[] = {
0xFFFFFFFFFFFFFFFFUL, 0x4B82D120A93AD2CAUL, 0x43DB5BFCE0FD108EUL, 0x08E24FA074E5AB31UL,
0x770988C0BAD946E2UL, 0xBBE117577A615D6CUL, 0x521F2B18177B200CUL, 0xD87602733EC86A64UL,
0xF12FFA06D98A0864UL, 0xCEE3D2261AD2EE6BUL, 0x1E8C94E04A25619DUL, 0xABF5AE8CDB0933D7UL,
0xB3970F85A6E1E4C7UL, 0x8AEA71575D060C7DUL, 0xECFB850458DBEF0AUL, 0xA85521ABDF1CBA64UL,
0xAD33170D04507A33UL, 0x15728E5A8AAAC42DUL, 0x15D2261898FA0510UL, 0x3995497CEA956AE5UL,
0xDE2BCBF695581718UL, 0xB5C55DF06F4C52C9UL, 0x9B2783A2EC07A28FUL, 0xE39E772C180E8603UL,
0x32905E462E36CE3BUL, 0xF1746C08CA18217CUL, 0x670C354E4ABC9804UL, 0x9ED529077096966DUL,
0x1C62F356208552BBUL, 0x83655D23DCA3AD96UL, 0x69163FA8FD24CF5FUL, 0x98DA48361C55D39AUL,
0xC2007CB8A163BF05UL, 0x49286651ECE45B3DUL, 0xAE9F24117C4B1FE6UL, 0xEE386BFB5A899FA5UL,
0x0BFF5CB6F406B7EDUL, 0xF44C42E9A637ED6BUL, 0xE485B576625E7EC6UL, 0x4FE1356D6D51C245UL,
0x302B0A6DF25F1437UL, 0xEF9519B3CD3A431BUL, 0x514A08798E3404DDUL, 0x020BBEA63B139B22UL,
0x29024E088A67CC74UL, 0xC4C6628B80DC1CD1UL, 0xC90FDAA22168C234UL, 0xFFFFFFFFFFFFFFFFUL
};
// RFC 3526: 4096-bit MODP GroupUL, this prime is: 2^4096 - 2^4032 - 1 + 2^64 * { [2^3966 pi] + 240904 }
static const BN_UINT RFC3526_PRIME_4096[] = {
0xFFFFFFFFFFFFFFFFUL, 0x4DF435C934063199UL, 0x86FFB7DC90A6C08FUL, 0x93B4EA988D8FDDC1UL,
0xD0069127D5B05AA9UL, 0xB81BDD762170481CUL, 0x1F612970CEE2D7AFUL, 0x233BA186515BE7EDUL,
0x99B2964FA090C3A2UL, 0x287C59474E6BC05DUL, 0x2E8EFC141FBECAA6UL, 0xDBBBC2DB04DE8EF9UL,
0x2583E9CA2AD44CE8UL, 0x1A946834B6150BDAUL, 0x99C327186AF4E23CUL, 0x88719A10BDBA5B26UL,
0x1A723C12A787E6D7UL, 0x4B82D120A9210801UL, 0x43DB5BFCE0FD108EUL, 0x08E24FA074E5AB31UL,
0x770988C0BAD946E2UL, 0xBBE117577A615D6CUL, 0x521F2B18177B200CUL, 0xD87602733EC86A64UL,
0xF12FFA06D98A0864UL, 0xCEE3D2261AD2EE6BUL, 0x1E8C94E04A25619DUL, 0xABF5AE8CDB0933D7UL,
0xB3970F85A6E1E4C7UL, 0x8AEA71575D060C7DUL, 0xECFB850458DBEF0AUL, 0xA85521ABDF1CBA64UL,
0xAD33170D04507A33UL, 0x15728E5A8AAAC42DUL, 0x15D2261898FA0510UL, 0x3995497CEA956AE5UL,
0xDE2BCBF695581718UL, 0xB5C55DF06F4C52C9UL, 0x9B2783A2EC07A28FUL, 0xE39E772C180E8603UL,
0x32905E462E36CE3BUL, 0xF1746C08CA18217CUL, 0x670C354E4ABC9804UL, 0x9ED529077096966DUL,
0x1C62F356208552BBUL, 0x83655D23DCA3AD96UL, 0x69163FA8FD24CF5FUL, 0x98DA48361C55D39AUL,
0xC2007CB8A163BF05UL, 0x49286651ECE45B3DUL, 0xAE9F24117C4B1FE6UL, 0xEE386BFB5A899FA5UL,
0x0BFF5CB6F406B7EDUL, 0xF44C42E9A637ED6BUL, 0xE485B576625E7EC6UL, 0x4FE1356D6D51C245UL,
0x302B0A6DF25F1437UL, 0xEF9519B3CD3A431BUL, 0x514A08798E3404DDUL, 0x020BBEA63B139B22UL,
0x29024E088A67CC74UL, 0xC4C6628B80DC1CD1UL, 0xC90FDAA22168C234UL, 0xFFFFFFFFFFFFFFFFUL
};
#elif defined(HITLS_THIRTY_TWO_BITS)
// RFC 3526: 2048-bit MODP Group, this prime is: 2^2048 - 2^1984 - 1 + 2^64 * { [2^1918 pi] + 124476 }
static const BN_UINT RFC3526_PRIME_2048[] = {
0xFFFFFFFF, 0xFFFFFFFF, 0x8AACAA68, 0x15728E5A, 0x98FA0510, 0x15D22618, 0xEA956AE5, 0x3995497C,
0x95581718, 0xDE2BCBF6, 0x6F4C52C9, 0xB5C55DF0, 0xEC07A28F, 0x9B2783A2, 0x180E8603, 0xE39E772C,
0x2E36CE3B, 0x32905E46, 0xCA18217C, 0xF1746C08, 0x4ABC9804, 0x670C354E, 0x7096966D, 0x9ED52907,
0x208552BB, 0x1C62F356, 0xDCA3AD96, 0x83655D23, 0xFD24CF5F, 0x69163FA8, 0x1C55D39A, 0x98DA4836,
0xA163BF05, 0xC2007CB8, 0xECE45B3D, 0x49286651, 0x7C4B1FE6, 0xAE9F2411, 0x5A899FA5, 0xEE386BFB,
0xF406B7ED, 0x0BFF5CB6, 0xA637ED6B, 0xF44C42E9, 0x625E7EC6, 0xE485B576, 0x6D51C245, 0x4FE1356D,
0xF25F1437, 0x302B0A6D, 0xCD3A431B, 0xEF9519B3, 0x8E3404DD, 0x514A0879, 0x3B139B22, 0x020BBEA6,
0x8A67CC74, 0x29024E08, 0x80DC1CD1, 0xC4C6628B, 0x2168C234, 0xC90FDAA2, 0xFFFFFFFF, 0xFFFFFFFF
};
// RFC 3526: 3072-bit MODP Group, this prime is: 2^3072 - 2^3008 - 1 + 2^64 * { [2^2942 pi] + 1690314 }
static const BN_UINT RFC3526_PRIME_3072[] = {
0xFFFFFFFF, 0xFFFFFFFF, 0xA93AD2CA, 0x4B82D120, 0xE0FD108E, 0x43DB5BFC, 0x74E5AB31, 0x08E24FA0,
0xBAD946E2, 0x770988C0, 0x7A615D6C, 0xBBE11757, 0x177B200C, 0x521F2B18, 0x3EC86A64, 0xD8760273,
0xD98A0864, 0xF12FFA06, 0x1AD2EE6B, 0xCEE3D226, 0x4A25619D, 0x1E8C94E0, 0xDB0933D7, 0xABF5AE8C,
0xA6E1E4C7, 0xB3970F85, 0x5D060C7D, 0x8AEA7157, 0x58DBEF0A, 0xECFB8504, 0xDF1CBA64, 0xA85521AB,
0x04507A33, 0xAD33170D, 0x8AAAC42D, 0x15728E5A, 0x98FA0510, 0x15D22618, 0xEA956AE5, 0x3995497C,
0x95581718, 0xDE2BCBF6, 0x6F4C52C9, 0xB5C55DF0, 0xEC07A28F, 0x9B2783A2, 0x180E8603, 0xE39E772C,
0x2E36CE3B, 0x32905E46, 0xCA18217C, 0xF1746C08, 0x4ABC9804, 0x670C354E, 0x7096966D, 0x9ED52907,
0x208552BB, 0x1C62F356, 0xDCA3AD96, 0x83655D23, 0xFD24CF5F, 0x69163FA8, 0x1C55D39A, 0x98DA4836,
0xA163BF05, 0xC2007CB8, 0xECE45B3D, 0x49286651, 0x7C4B1FE6, 0xAE9F2411, 0x5A899FA5, 0xEE386BFB,
0xF406B7ED, 0x0BFF5CB6, 0xA637ED6B, 0xF44C42E9, 0x625E7EC6, 0xE485B576, 0x6D51C245, 0x4FE1356D,
0xF25F1437, 0x302B0A6D, 0xCD3A431B, 0xEF9519B3, 0x8E3404DD, 0x514A0879, 0x3B139B22, 0x020BBEA6,
0x8A67CC74, 0x29024E08, 0x80DC1CD1, 0xC4C6628B, 0x2168C234, 0xC90FDAA2, 0xFFFFFFFF, 0xFFFFFFFF
};
// RFC 3526: 4096-bit MODP Group, this prime is: 2^4096 - 2^4032 - 1 + 2^64 * { [2^3966 pi] + 240904 }
static const BN_UINT RFC3526_PRIME_4096[] = {
0xFFFFFFFF, 0xFFFFFFFF, 0x34063199, 0x4DF435C9, 0x90A6C08F, 0x86FFB7DC, 0x8D8FDDC1, 0x93B4EA98,
0xD5B05AA9, 0xD0069127, 0x2170481C, 0xB81BDD76, 0xCEE2D7AF, 0x1F612970, 0x515BE7ED, 0x233BA186,
0xA090C3A2, 0x99B2964F, 0x4E6BC05D, 0x287C5947, 0x1FBECAA6, 0x2E8EFC14, 0x04DE8EF9, 0xDBBBC2DB,
0x2AD44CE8, 0x2583E9CA, 0xB6150BDA, 0x1A946834, 0x6AF4E23C, 0x99C32718, 0xBDBA5B26, 0x88719A10,
0xA787E6D7, 0x1A723C12, 0xA9210801, 0x4B82D120, 0xE0FD108E, 0x43DB5BFC, 0x74E5AB31, 0x08E24FA0,
0xBAD946E2, 0x770988C0, 0x7A615D6C, 0xBBE11757, 0x177B200C, 0x521F2B18, 0x3EC86A64, 0xD8760273,
0xD98A0864, 0xF12FFA06, 0x1AD2EE6B, 0xCEE3D226, 0x4A25619D, 0x1E8C94E0, 0xDB0933D7, 0xABF5AE8C,
0xA6E1E4C7, 0xB3970F85, 0x5D060C7D, 0x8AEA7157, 0x58DBEF0A, 0xECFB8504, 0xDF1CBA64, 0xA85521AB,
0x04507A33, 0xAD33170D, 0x8AAAC42D, 0x15728E5A, 0x98FA0510, 0x15D22618, 0xEA956AE5, 0x3995497C,
0x95581718, 0xDE2BCBF6, 0x6F4C52C9, 0xB5C55DF0, 0xEC07A28F, 0x9B2783A2, 0x180E8603, 0xE39E772C,
0x2E36CE3B, 0x32905E46, 0xCA18217C, 0xF1746C08, 0x4ABC9804, 0x670C354E, 0x7096966D, 0x9ED52907,
0x208552BB, 0x1C62F356, 0xDCA3AD96, 0x83655D23, 0xFD24CF5F, 0x69163FA8, 0x1C55D39A, 0x98DA4836,
0xA163BF05, 0xC2007CB8, 0xECE45B3D, 0x49286651, 0x7C4B1FE6, 0xAE9F2411, 0x5A899FA5, 0xEE386BFB,
0xF406B7ED, 0x0BFF5CB6, 0xA637ED6B, 0xF44C42E9, 0x625E7EC6, 0xE485B576, 0x6D51C245, 0x4FE1356D,
0xF25F1437, 0x302B0A6D, 0xCD3A431B, 0xEF9519B3, 0x8E3404DD, 0x514A0879, 0x3B139B22, 0x020BBEA6,
0x8A67CC74, 0x29024E08, 0x80DC1CD1, 0xC4C6628B, 0x2168C234, 0xC90FDAA2, 0xFFFFFFFF, 0xFFFFFFFF
};
#endif
static BN_BigNum g_bnRfc3526Prime2048 = {
false,
(uint32_t)sizeof(RFC3526_PRIME_2048) / sizeof(RFC3526_PRIME_2048[0]),
(uint32_t)sizeof(RFC3526_PRIME_2048) / sizeof(RFC3526_PRIME_2048[0]),
0,
(BN_UINT *)(uintptr_t)RFC3526_PRIME_2048
};
static BN_BigNum g_bnRfc3526Prime3072 = {
false,
(uint32_t)sizeof(RFC3526_PRIME_3072) / sizeof(RFC3526_PRIME_3072[0]),
(uint32_t)sizeof(RFC3526_PRIME_3072) / sizeof(RFC3526_PRIME_3072[0]),
0,
(BN_UINT *)(uintptr_t)RFC3526_PRIME_3072
};
static BN_BigNum g_bnRfc3526Prime4096 = {
false,
(uint32_t)sizeof(RFC3526_PRIME_4096) / sizeof(RFC3526_PRIME_4096[0]),
(uint32_t)sizeof(RFC3526_PRIME_4096) / sizeof(RFC3526_PRIME_4096[0]),
0,
(BN_UINT *)(uintptr_t)RFC3526_PRIME_4096
};
static BN_BigNum *GetBnConst(BN_BigNum *outConst, BN_BigNum *inConst)
{
if (outConst == NULL) {
return BN_Dup(inConst);
} else {
if (BN_Copy(outConst, inConst) != CRYPT_SUCCESS) {
return NULL;
}
return outConst;
}
}
BN_BigNum *BN_GetRfc3526Prime(BN_BigNum *r, uint32_t len)
{
switch (len) {
case 2048: // return 2048-bit MODP bn
return GetBnConst(r, &g_bnRfc3526Prime2048);
case 3072: // return 3072-bit MODP bn
return GetBnConst(r, &g_bnRfc3526Prime3072);
case 4096: // return 4096-bit MODP bn
return GetBnConst(r, &g_bnRfc3526Prime4096);
default:
return NULL;
}
}
#endif /* HITLS_CRYPTO_BN_PRIME_RFC3526 */
| 2302_82127028/openHiTLS-examples_1508 | crypto/bn/src/bn_const.c | C | unknown | 10,049 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#include "hitls_build.h"
#ifdef HITLS_CRYPTO_BN
#include <stdbool.h>
#include "securec.h"
#include "bsl_sal.h"
#include "bsl_err_internal.h"
#include "crypt_errno.h"
#include "bn_basic.h"
#include "bn_bincal.h"
#include "bn_optimizer.h"
/* Euclidean algorithm */
static int32_t BnGcdDiv(BN_BigNum *r, BN_BigNum *max, BN_BigNum *min, BN_Optimizer *opt)
{
int32_t ret = CRYPT_SUCCESS;
BN_BigNum *tmp = NULL;
BN_BigNum *big = max;
BN_BigNum *small = min;
do {
ret = BN_Div(NULL, big, big, small, opt);
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
return ret;
}
if (BN_IsOne(big)) {
return BN_Copy(r, big);
}
if (BN_IsZero(big)) {
return BN_Copy(r, small);
}
/* ensure that big > small in the next calculation of remainder */
tmp = big;
big = small;
small = tmp;
} while (true);
return CRYPT_SUCCESS;
}
int32_t BnGcdCheckInput(BN_BigNum *r, const BN_BigNum *a, const BN_BigNum *b, const BN_Optimizer *opt)
{
bool invalidInput = (a == NULL || b == NULL || r == NULL || opt == NULL);
if (invalidInput) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return CRYPT_NULL_INPUT;
}
/* The GCD may be the minimum value between a and b. Ensure the r space before calculation. */
uint32_t needSize = (a->size < b->size) ? a->size : b->size;
int32_t ret = BnExtend(r, needSize);
if (ret != CRYPT_SUCCESS) {
return ret;
}
// a and b cannot be 0
if (BN_IsZero(a) || BN_IsZero(b)) {
BSL_ERR_PUSH_ERROR(CRYPT_BN_ERR_GCD_NO_ZERO);
return CRYPT_BN_ERR_GCD_NO_ZERO;
}
return CRYPT_SUCCESS;
}
int32_t BN_Gcd(BN_BigNum *r, const BN_BigNum *a, const BN_BigNum *b, BN_Optimizer *opt)
{
int32_t ret = BnGcdCheckInput(r, a, b, opt);
if (ret != CRYPT_SUCCESS) {
return ret;
}
ret = BinCmp(a->data, a->size, b->data, b->size);
if (ret == 0) { // For example, a == b is the greatest common divisor of itself
ret = BN_Copy(r, a);
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
return ret;
}
r->sign = false; // the greatest common divisor is a positive integer
return CRYPT_SUCCESS;
}
const BN_BigNum *bigNum = (ret > 0) ? a : b;
const BN_BigNum *smallNum = (ret > 0) ? b : a;
ret = OptimizerStart(opt); // use the optimizer
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
return ret;
}
/* Apply for temporary space of BN objects a and b. */
BN_BigNum *max = OptimizerGetBn(opt, bigNum->size);
BN_BigNum *min = OptimizerGetBn(opt, smallNum->size);
if (max == NULL || min == NULL) {
OptimizerEnd(opt);
BSL_ERR_PUSH_ERROR(CRYPT_BN_OPTIMIZER_GET_FAIL);
return CRYPT_BN_OPTIMIZER_GET_FAIL;
}
ret = BN_Copy(max, bigNum);
if (ret != CRYPT_SUCCESS) {
OptimizerEnd(opt);
BSL_ERR_PUSH_ERROR(ret);
return ret;
}
ret = BN_Copy(min, smallNum);
if (ret != CRYPT_SUCCESS) {
OptimizerEnd(opt);
BSL_ERR_PUSH_ERROR(ret);
return ret;
}
// obtain the GCD, ensure that input parameter max > min
ret = BnGcdDiv(r, max, min, opt);
if (ret == CRYPT_SUCCESS) {
r->sign = false; // The GCD is a positive integer
}
OptimizerEnd(opt); // release occupation from the optimizer
return ret;
}
static int32_t InverseReady(BN_BigNum *a, BN_BigNum *b, const BN_BigNum *x, const BN_BigNum *m, BN_Optimizer *opt)
{
int32_t ret = BN_Copy(a, m);
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
return ret;
}
a->sign = false;
ret = BN_Mod(b, x, m, opt); // b must be a positive number and do not need to convert symbols.
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
return ret;
}
if (BN_IsZero(b)) { // does not satisfy x and m interprime, so it cannot obtain the inverse module.
BSL_ERR_PUSH_ERROR(CRYPT_BN_ERR_NO_INVERSE);
return CRYPT_BN_ERR_NO_INVERSE;
}
return CRYPT_SUCCESS;
}
static int32_t InverseCore(BN_BigNum *r, BN_BigNum *x, BN_BigNum *y, uint32_t mSize, BN_Optimizer *opt)
{
BN_BigNum *a = x;
BN_BigNum *b = y;
BN_BigNum *c = OptimizerGetBn(opt, mSize); // One more bit is reserved for addition and subtraction.
BN_BigNum *d = OptimizerGetBn(opt, mSize);
BN_BigNum *e = OptimizerGetBn(opt, mSize * 2); // multiplication of c requires 2x space
BN_BigNum *t = OptimizerGetBn(opt, mSize);
if (c == NULL || d == NULL || e == NULL || t == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_BN_OPTIMIZER_GET_FAIL);
return CRYPT_BN_OPTIMIZER_GET_FAIL;
}
(void)BN_SetBit(d, 0); // can ignore the return value
do {
int32_t ret = BN_Div(t, a, a, b, opt);
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
return ret;
}
if (BN_IsZero(a)) {
if (BN_IsOne(b)) { // b is 1
return BN_SetLimb(r, 1); // obtains the inverse modulus value 1
}
break; // Failed to obtain the inverse modulus value.
}
t->sign = !t->sign;
ret = BN_Mul(e, t, d, opt);
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
return ret;
}
ret = BN_Add(c, c, e);
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
return ret;
}
if (BN_IsOne(a)) {
return BN_Copy(r, c); // Obtain the module inverse.
}
// Switch a b
BN_BigNum *tmp = a;
a = b;
b = tmp;
// Switch c d
tmp = c;
c = d;
d = tmp;
} while (true);
BSL_ERR_PUSH_ERROR(CRYPT_BN_ERR_NO_INVERSE);
return CRYPT_BN_ERR_NO_INVERSE;
}
int32_t InverseInputCheck(BN_BigNum *r, const BN_BigNum *x, const BN_BigNum *m, const BN_Optimizer *opt)
{
bool invalidInput = (r == NULL || x == NULL || m == NULL || opt == NULL);
if (invalidInput) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return CRYPT_NULL_INPUT;
}
/* cannot be 0 */
if (BN_IsZero(x) || BN_IsZero(m)) {
BSL_ERR_PUSH_ERROR(CRYPT_BN_ERR_DIVISOR_ZERO);
return CRYPT_BN_ERR_DIVISOR_ZERO;
}
return BnExtend(r, m->size);
}
int32_t BN_ModInv(BN_BigNum *r, const BN_BigNum *x, const BN_BigNum *m, BN_Optimizer *opt)
{
int32_t ret = InverseInputCheck(r, x, m, opt);
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
return ret;
}
ret = OptimizerStart(opt); // use the optimizer
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
return ret;
}
BN_BigNum *a = OptimizerGetBn(opt, m->size);
BN_BigNum *b = OptimizerGetBn(opt, m->size);
BN_BigNum *t = OptimizerGetBn(opt, m->size);
bool invalidInput = (a == NULL || b == NULL || t == NULL);
if (invalidInput) {
BSL_ERR_PUSH_ERROR(CRYPT_BN_OPTIMIZER_GET_FAIL);
ret = CRYPT_BN_OPTIMIZER_GET_FAIL;
goto ERR;
}
/* Take positive numbers a and b first. */
ret = InverseReady(a, b, x, m, opt);
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
goto ERR;
}
/* Extended Euclidean algorithm */
ret = InverseCore(t, a, b, m->size, opt);
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
goto ERR;
}
// Prevent the negative number.
ret = BN_Mod(r, t, m, opt);
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
goto ERR;
}
ERR:
OptimizerEnd(opt); // Release occupation from the optimizer.
return ret;
}
#endif /* HITLS_CRYPTO_BN */
| 2302_82127028/openHiTLS-examples_1508 | crypto/bn/src/bn_gcd.c | C | unknown | 8,304 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#include "hitls_build.h"
#ifdef HITLS_CRYPTO_BN
#include <stdbool.h>
#include "securec.h"
#include "bsl_sal.h"
#include "bsl_err_internal.h"
#include "crypt_errno.h"
#include "bn_basic.h"
#include "bn_bincal.h"
#include "bn_optimizer.h"
int32_t BN_Lcm(BN_BigNum *r, const BN_BigNum *a, const BN_BigNum *b, BN_Optimizer *opt)
{
if (r == NULL || a == NULL || b == NULL || opt == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return CRYPT_NULL_INPUT;
}
BN_BigNum *gcd = BN_Create(BN_Bits(r));
if (gcd == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_MEM_ALLOC_FAIL);
return CRYPT_MEM_ALLOC_FAIL;
}
int32_t ret = BN_Gcd(gcd, a, b, opt);
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
BN_Destroy(gcd);
return ret;
}
if (BN_IsOne(gcd) == false) {
ret = BN_Div(r, NULL, a, gcd, opt);
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
BN_Destroy(gcd);
return ret;
}
BN_Destroy(gcd);
return BN_Mul(r, r, b, opt);
}
BN_Destroy(gcd); // a and b are coprime.
return BN_Mul(r, a, b, opt);
}
#endif /* HITLS_CRYPTO_BN */ | 2302_82127028/openHiTLS-examples_1508 | crypto/bn/src/bn_lcm.c | C | unknown | 1,728 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#include "hitls_build.h"
#ifdef HITLS_CRYPTO_BN
#include <stdint.h>
#include <stdbool.h>
#include "securec.h"
#include "bsl_err_internal.h"
#include "bsl_sal.h"
#include "crypt_errno.h"
#include "bn_bincal.h"
#include "bn_optimizer.h"
#include "crypt_utils.h"
#include "bn_montbin.h"
// The mont contains 4 BN_UINT* fields and 2 common fields.
#define MAX_MONT_SIZE ((BITS_TO_BN_UNIT(BN_MAX_BITS) * 4 + 2) * sizeof(BN_UINT))
static void CopyConsttime(BN_UINT *dst, const BN_UINT *a, const BN_UINT *b, uint32_t len, BN_UINT mask)
{
BN_UINT rmask = ~mask;
for (uint32_t i = 0; i < len; i++) {
dst[i] = (a[i] & mask) ^ (b[i] & rmask);
}
}
/* reduce(r) */
static void MontDecBin(BN_UINT *r, BN_Mont *mont)
{
uint32_t mSize = mont->mSize;
BN_UINT *x = mont->t;
BN_COPY_BYTES(x, mSize << 1, r, mSize);
Reduce(r, x, mont->one, mont->mod, mSize, mont->k0);
}
/* Return value is (r - m0)' mod r */
static BN_UINT Inverse(BN_UINT m0)
{
BN_UINT x = 2; /* 2^1 */
BN_UINT y = 1;
BN_UINT mask = 1; /* Mask */
for (uint32_t i = 1; i < BN_UINT_BITS; i++, x <<= 1) {
BN_UINT rH, rL;
mask = (mask << 1) | 1;
MUL_AB(rH, rL, m0, y);
if (x < (rL & mask)) {
y += x;
}
(void)rH;
}
return (BN_UINT)(0 - y);
}
/* Pre-computation */
static int32_t MontExpReady(BN_BigNum *table[], uint32_t num, BN_Mont *mont, BN_Optimizer *opt, bool consttime)
{
BN_UINT *b = mont->b;
uint32_t i;
for (i = 1; i < num; i++) { /* Request num - 1 data blocks */
table[i] = OptimizerGetBn(opt, mont->mSize);
if (table[i] == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_BN_OPTIMIZER_GET_FAIL);
return CRYPT_BN_OPTIMIZER_GET_FAIL;
}
}
table[0] = table[1];
(void)memcpy_s(table[1]->data, mont->mSize * sizeof(BN_UINT), b, mont->mSize * sizeof(BN_UINT));
for (i = 2; i < num; i++) { /* precompute num - 2 data blocks */
int32_t ret = MontMulBin(table[i]->data, table[0]->data, table[i - 1]->data, mont, opt, consttime);
if (ret != CRYPT_SUCCESS) {
return ret;
}
}
return CRYPT_SUCCESS;
}
static uint32_t GetELimb(const BN_UINT *e, BN_UINT *eLimb, uint32_t base, uint32_t bits)
{
if (bits > base) { /* Required data */
(*eLimb) = e[0] & (((1u) << base) - 1);
return base;
}
(*eLimb) = 0;
for (uint32_t i = 0; i < bits; i++) {
uint32_t bit = base - i - 1;
uint32_t nw = bit / BN_UINT_BITS; /* shift words */
uint32_t nb = bit % BN_UINT_BITS; /* shift bits */
(*eLimb) <<= 1;
(*eLimb) |= ((e[nw] >> nb) & 1);
}
return bits;
}
static uint32_t GetReadySize(uint32_t bits)
{
if (bits > 512) { /* If bits are greater than 512 */
return 6; /* The size is 6. */
}
if (bits > 256) { /* If bits are greater than 256 */
return 5; /* The size is 5. */
}
if (bits > 128) { /* If bits are greater than 128 */
return 4; /* The size is 4. */
}
if (bits > 64) { /* If bits are greater than 64 */
return 3; /* The size is 3. */
}
if (bits > 32) { /* If bits are greater than 32 */
return 2; /* The size is 2. */
}
return 1;
}
/* r = r ^ e mod mont */
static int32_t MontExpBin(BN_UINT *r, const BN_UINT *e, uint32_t eSize, BN_Mont *mont,
BN_Optimizer *opt, bool consttime)
{
BN_BigNum *table[64] = { 0 }; /* 0 -- 2^6 that is 0 -- 64 */
int32_t ret = OptimizerStart(opt);
if (ret != CRYPT_SUCCESS) {
return ret;
}
(void)memcpy_s(mont->b, mont->mSize * sizeof(BN_UINT), r, mont->mSize * sizeof(BN_UINT));
uint32_t base = BinBits(e, eSize) - 1;
uint32_t perSize = GetReadySize(base);
const uint32_t readySize = 1 << perSize;
ret = MontExpReady(table, readySize, mont, opt, consttime);
if (ret != CRYPT_SUCCESS) {
OptimizerEnd(opt);
return ret;
}
do {
BN_UINT eLimb;
uint32_t bit = GetELimb(e, &eLimb, base, perSize);
for (uint32_t i = 0; i < bit; i++) {
ret = MontSqrBin(r, mont, opt, consttime);
if (ret != CRYPT_SUCCESS) {
OptimizerEnd(opt);
return ret;
}
}
if (consttime == true) {
BN_UINT *x = mont->t;
BN_UINT mask = ~BN_IsZeroUintConsttime(eLimb);
ret = MontMulBin(x, r, table[eLimb]->data, mont, opt, consttime);
if (ret != CRYPT_SUCCESS) {
OptimizerEnd(opt);
return ret;
}
CopyConsttime(r, x, r, mont->mSize, mask);
} else if (eLimb != 0) {
ret = MontMulBin(r, r, table[eLimb]->data, mont, opt, consttime);
if (ret != CRYPT_SUCCESS) {
OptimizerEnd(opt);
return ret;
}
}
base -= bit;
} while (base != 0);
OptimizerEnd(opt);
return CRYPT_SUCCESS;
}
static int32_t MontParaCheck(BN_BigNum *r, const BN_BigNum *a, const BN_BigNum *e, const BN_Mont *mont)
{
if (r == NULL || a == NULL || e == NULL || mont == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return CRYPT_NULL_INPUT;
}
if (e->sign) {
BSL_ERR_PUSH_ERROR(CRYPT_BN_ERR_EXP_NO_NEGATIVE);
return CRYPT_BN_ERR_EXP_NO_NEGATIVE;
}
return BnExtend(r, mont->mSize);
}
static const BN_BigNum *DealBaseNum(const BN_BigNum *a, BN_Mont *mont, BN_Optimizer *opt, int32_t *ret)
{
const BN_BigNum *aTmp = a;
if (BinCmp(a->data, a->size, mont->mod, mont->mSize) >= 0) {
BN_BigNum *tmpval = OptimizerGetBn(opt, a->size + 2); // BinDiv need a->room >= a->size + 2
BN_BigNum *tmpMod = OptimizerGetBn(opt, mont->mSize); // BinDiv need a->room >= a->size + 2
if (tmpval == NULL || tmpMod == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_BN_OPTIMIZER_GET_FAIL);
*ret = CRYPT_BN_OPTIMIZER_GET_FAIL;
return NULL;
}
*ret = BN_Copy(tmpval, a);
if (*ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(*ret);
return NULL;
}
(void)memcpy_s(tmpMod->data, mont->mSize * sizeof(BN_UINT), mont->mod, mont->mSize * sizeof(BN_UINT));
tmpval->size = BinDiv(NULL, NULL, tmpval->data, tmpval->size, tmpMod->data, mont->mSize);
aTmp = tmpval;
}
return aTmp;
}
static const BN_UINT *TmpValueHandle(BN_BigNum *r, const BN_BigNum *e, const BN_BigNum *a, BN_Optimizer *opt)
{
const BN_UINT *te = e->data;
uint32_t esize = e->size;
if (e == r) {
BN_BigNum *ee = OptimizerGetBn(opt, esize);
if (ee == NULL) {
return NULL;
}
(void)memcpy_s(ee->data, esize * sizeof(BN_UINT), e->data, esize * sizeof(BN_UINT));
te = ee->data;
}
BN_COPY_BYTES(r->data, r->room, a->data, a->size);
return te;
}
/* must satisfy the absolute value x < mod */
static int32_t MontExpCore(BN_BigNum *r, const BN_BigNum *a, const BN_BigNum *e,
BN_Mont *mont, BN_Optimizer *opt, bool consttime)
{
if ((BinBits(e->data, e->size) == 0)) {
if (mont->mSize != 1) {
return BN_SetLimb(r, 1);
}
return (mont->mod[0] == 1) ? BN_Zeroize(r) : BN_SetLimb(r, 1);
}
if (a->size == 0) {
return BN_Zeroize(r);
}
int32_t ret = OptimizerStart(opt);
if (ret != CRYPT_SUCCESS) {
return ret;
}
/* if a >= mod */
const BN_BigNum *aTmp = DealBaseNum(a, mont, opt, &ret);
if (aTmp == NULL) {
OptimizerEnd(opt);
return ret;
}
const BN_UINT *te = TmpValueHandle(r, e, aTmp, opt);
if (te == NULL) {
OptimizerEnd(opt);
BSL_ERR_PUSH_ERROR(CRYPT_BN_OPTIMIZER_GET_FAIL);
return CRYPT_BN_OPTIMIZER_GET_FAIL;
}
/* field conversion */
ret = MontEncBin(r->data, mont, opt, consttime);
if (ret != CRYPT_SUCCESS) {
OptimizerEnd(opt);
return ret;
}
/* modular exponentiation */
ret = MontExpBin(r->data, te, e->size, mont, opt, consttime);
if (ret != CRYPT_SUCCESS) {
OptimizerEnd(opt);
return ret;
}
/* field conversion */
MontDecBin(r->data, mont);
/* negative number processing */
r->size = BinFixSize(r->data, mont->mSize);
if (aTmp->sign && ((te[0] & 0x1) == 1) && r->size != 0) {
BinSub(r->data, mont->mod, r->data, mont->mSize);
r->size = BinFixSize(r->data, mont->mSize);
}
r->sign = false;
OptimizerEnd(opt);
return CRYPT_SUCCESS;
}
static int32_t MontExp(BN_BigNum *r, const BN_BigNum *a, const BN_BigNum *e, BN_Mont *mont,
BN_Optimizer *opt, bool consttime)
{
int32_t ret = MontParaCheck(r, a, e, mont);
if (ret != CRYPT_SUCCESS) {
return ret;
}
bool newOpt = (opt == NULL);
if (newOpt) {
opt = BN_OptimizerCreate();
if (opt == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_MEM_ALLOC_FAIL);
return CRYPT_MEM_ALLOC_FAIL;
}
}
ret = MontExpCore(r, a, e, mont, opt, consttime);
if (newOpt) {
BN_OptimizerDestroy(opt);
}
return ret;
}
int32_t BN_MontExp(BN_BigNum *r, const BN_BigNum *a, const BN_BigNum *e, BN_Mont *mont, BN_Optimizer *opt)
{
bool consttime = (BN_IsFlag(a, CRYPT_BN_FLAG_CONSTTIME) || BN_IsFlag(e, CRYPT_BN_FLAG_CONSTTIME));
return MontExp(r, a, e, mont, opt, consttime);
}
/* must satisfy the absolute value x < mod */
int32_t BN_MontExpConsttime(BN_BigNum *r, const BN_BigNum *a, const BN_BigNum *e, BN_Mont *mont, BN_Optimizer *opt)
{
return MontExp(r, a, e, mont, opt, true);
}
static uint32_t MontSize(uint32_t room)
{
uint32_t size = (uint32_t)(sizeof(BN_Mont) + sizeof(BN_UINT));
/* Requires 6 * room + 1 space. mod(1) + montRR(1) + b(1) + t(2) + one = 6.
In addition, one more room is required when the modulus is set later. */
size += (room * 6 + 1) * ((uint32_t)sizeof(BN_UINT));
return size;
}
void BN_MontDestroy(BN_Mont *mont)
{
if (mont == NULL) {
return;
}
(void)memset_s(mont, MontSize(mont->mSize), 0, MontSize(mont->mSize));
BSL_SAL_FREE(mont);
}
/* set the modulus */
static void SetMod(BN_Mont *mont, const BN_BigNum *mod)
{
uint32_t mSize = mod->size;
(void)memcpy_s(mont->mod, mSize * sizeof(BN_UINT), mod->data, mSize * sizeof(BN_UINT));
(void)memset_s(mont->one, mSize * 3 * sizeof(BN_UINT), 0, mSize * 3 * sizeof(BN_UINT)); /* clear one and RR */
mont->one[0] = 1; /* set one */
mont->k0 = Inverse(mod->data[0]);
mont->montRR[mSize * 2] = 1; /* 2^2n */
mont->montRR[mSize * 2 + 1] = 0; /* 2 more rooms are provided to ensure the division does not exceed the limit */
mont->montRR[mSize * 2 + 2] = 0; /* 2 more rooms are provided to ensure the division does not exceed the limit */
// The size of the space required for calculating the montRR is 2 * mSize + 1
(void)BinDiv(NULL, NULL, mont->montRR, 2 * mSize + 1, mont->mod, mSize);
(void)memcpy_s(mont->mod, mSize * sizeof(BN_UINT), mod->data, mSize * sizeof(BN_UINT));
}
/* create a Montgomery structure, where m is a modulo */
BN_Mont *BN_MontCreate(const BN_BigNum *m)
{
if (m == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return NULL;
}
if (!BN_GetBit(m, 0) || m->sign) {
BSL_ERR_PUSH_ERROR(CRYPT_INVALID_ARG);
return NULL;
}
uint32_t mSize = m->size;
uint32_t montSize = MontSize(mSize);
if (montSize > MAX_MONT_SIZE) {
BSL_ERR_PUSH_ERROR(CRYPT_BN_BITS_TOO_MAX);
return NULL;
}
BN_Mont *mont = BSL_SAL_Malloc(montSize);
if (mont == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_MEM_ALLOC_FAIL);
return NULL;
}
BN_UINT *base = AlignedPointer((uint8_t *)mont + sizeof(BN_Mont), sizeof(BN_UINT));
mont->mSize = mSize;
mont->mod = base; /* mSize */
mont->one = (base += mSize); /* mSize */
mont->montRR = (base += mSize); /* mSize */
mont->b = (base += mSize); /* mSize */
mont->t = base + mSize; /* 2 * mSize */
SetMod(mont, m);
return mont;
}
int32_t MontSqrBinCore(BN_UINT *r, BN_Mont *mont, BN_Optimizer *opt, bool consttime)
{
int32_t ret = OptimizerStart(opt);
if (ret != CRYPT_SUCCESS) {
return ret;
}
uint32_t mSize = mont->mSize;
BN_UINT *x = mont->t;
#ifdef HITLS_CRYPTO_BN_COMBA
BN_BigNum *bnSpace = OptimizerGetBn(opt, SpaceSize(mSize));
if (bnSpace == NULL) {
OptimizerEnd(opt);
BSL_ERR_PUSH_ERROR(CRYPT_BN_OPTIMIZER_GET_FAIL);
return CRYPT_BN_OPTIMIZER_GET_FAIL;
}
SqrConquer(x, r, mSize, bnSpace->data, consttime);
#else
(void)consttime;
BinSqr(x, mSize << 1, r, mSize);
#endif
Reduce(r, x, mont->one, mont->mod, mSize, mont->k0);
OptimizerEnd(opt);
return CRYPT_SUCCESS;
}
/* reduce(a)= (a * R') mod N) */
void ReduceCore(BN_UINT *r, BN_UINT *x, const BN_UINT *m, uint32_t mSize, BN_UINT m0)
{
BN_UINT carry = 0;
uint32_t n = 0;
/* Cyclic shift, obtain r = (x / R) mod N */
do {
BN_UINT q = x[n] * m0; /* q = (s[0] + x[i]) * m0 */
BN_UINT tmp = BinMulAcc(x + n, m, mSize, q); /* (s + qm) mod m == s. Refresh s[0] to x[0] */
/* Add carry to tmp and update carry flag. */
tmp = tmp + carry;
carry = (tmp < carry) ? 1 : 0;
/* Add tmp to x[mSize + n] and update the carry flag. */
x[mSize + n] += tmp;
carry = (x[mSize + n] < tmp) ? 1 : carry;
if (n + 1 == mSize) {
break;
}
n++;
} while (true);
/* If x < 2m, the carry value is 0 or -1. */
carry -= BinSub(r, x + mSize, m, mSize);
CopyConsttime(r, x + mSize, r, mSize, carry);
}
/* reduce(r * RR) */
int32_t MontEncBinCore(BN_UINT *r, BN_Mont *mont, BN_Optimizer *opt, bool consttime)
{
int32_t ret = OptimizerStart(opt);
if (ret != CRYPT_SUCCESS) {
return ret;
}
uint32_t mSize = mont->mSize;
BN_UINT *x = mont->t;
#ifdef HITLS_CRYPTO_BN_COMBA
BN_BigNum *bnSpace = OptimizerGetBn(opt, SpaceSize(mSize));
if (bnSpace == NULL) {
OptimizerEnd(opt);
BSL_ERR_PUSH_ERROR(CRYPT_BN_OPTIMIZER_GET_FAIL);
return CRYPT_BN_OPTIMIZER_GET_FAIL;
}
MulConquer(x, r, mont->montRR, mSize, bnSpace->data, consttime);
#else
(void)consttime;
BinMul(x, mSize << 1, r, mSize, mont->montRR, mSize);
#endif
Reduce(r, x, mont->one, mont->mod, mSize, mont->k0);
OptimizerEnd(opt);
return CRYPT_SUCCESS;
}
/* reduce(r * b) */
int32_t MontMulBinCore(BN_UINT *r, const BN_UINT *a, const BN_UINT *b, BN_Mont *mont, BN_Optimizer *opt, bool consttime)
{
int32_t ret = OptimizerStart(opt);
if (ret != CRYPT_SUCCESS) {
return ret;
}
uint32_t mSize = mont->mSize;
BN_UINT *x = mont->t;
#ifdef HITLS_CRYPTO_BN_COMBA
uint32_t size = SpaceSize(mSize);
BN_BigNum *bnSpace = OptimizerGetBn(opt, size);
if (bnSpace == NULL) {
OptimizerEnd(opt);
BSL_ERR_PUSH_ERROR(CRYPT_BN_OPTIMIZER_GET_FAIL);
return CRYPT_BN_OPTIMIZER_GET_FAIL;
}
MulConquer(x, a, b, mSize, bnSpace->data, consttime);
#else
(void)consttime;
BinMul(x, mSize << 1, a, mSize, b, mSize);
#endif
Reduce(r, x, mont->one, mont->mod, mSize, mont->k0);
OptimizerEnd(opt);
return CRYPT_SUCCESS;
}
#ifdef HITLS_CRYPTO_DSA
static int32_t GetFirstData(BN_UINT *r, uint32_t base1, uint32_t base2,
BN_BigNum *table1[], BN_BigNum *table2[], BN_Mont *mont,
BN_Optimizer *opt)
{
bool consttime = false;
if (base1 == base2) {
return MontMulBin(r, table1[0]->data, table2[0]->data, mont, opt, consttime);
} else if (base1 > base2) {
(void)memcpy_s(r, mont->mSize * sizeof(BN_UINT), table1[0]->data, mont->mSize * sizeof(BN_UINT));
} else {
(void)memcpy_s(r, mont->mSize * sizeof(BN_UINT), table2[0]->data, mont->mSize * sizeof(BN_UINT));
}
return CRYPT_SUCCESS;
}
/* Precalculate odd multiples of data. The data in the table is b^1, b^3, b^5...b^(2*num - 1) */
static int32_t MontExpOddReady(BN_BigNum *table[], uint32_t num, BN_Mont *mont, BN_Optimizer *opt, bool consttime)
{
BN_UINT *b = mont->b;
uint32_t i;
for (i = 0; i < num; i++) { /* Request num - 1 data blocks */
table[i] = OptimizerGetBn(opt, mont->mSize);
if (table[i] == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_BN_OPTIMIZER_GET_FAIL);
return CRYPT_BN_OPTIMIZER_GET_FAIL;
}
}
(void)memcpy_s(table[0]->data, mont->mSize * sizeof(BN_UINT), b, mont->mSize * sizeof(BN_UINT));
if (num == 1) {
// When num is 1, pre-computation is not need.
return CRYPT_SUCCESS;
}
int32_t ret = MontSqrBin(table[0]->data, // b^2
mont, opt, consttime);
if (ret != CRYPT_SUCCESS) {
return ret;
}
ret = MontMulBin(table[1]->data, table[0]->data, mont->b, // b^3
mont, opt, consttime);
if (ret != CRYPT_SUCCESS) {
return ret;
}
for (i = 2; i < num; i++) { /* precompute num - 2 data blocks */
// b^(2*i + 1)
ret = MontMulBin(table[i]->data, table[0]->data, table[i - 1]->data, mont, opt, consttime);
if (ret != CRYPT_SUCCESS) {
return ret;
}
}
(void)memcpy_s(table[0]->data, mont->mSize * sizeof(BN_UINT), b, mont->mSize * sizeof(BN_UINT));
return CRYPT_SUCCESS;
}
// Obtain the data with the length of bits from the start position of the base to the eLimb,
// ignore the high-order 0 data, and obtain an odd number or 0.
uint32_t GetOddLimbBin(const BN_UINT *e, BN_UINT *eLimb, uint32_t base, uint32_t bits, uint32_t size)
{
(*eLimb) = 0;
if (base == 0) {
return 0;
}
uint32_t loc = base;
uint32_t retBits = 0;
// Offset from current. Check whether non-zero data exists.
while (true) {
loc--;
uint32_t nw = loc / BN_UINT_BITS; /* shift words */
uint32_t nb = loc % BN_UINT_BITS; /* shift retBits */
if (nw < size && ((e[nw] >> nb) & 1) != 0) {
// Exit the loop when the bit is 1.
break;
}
retBits++;
if (loc == 0) {
// If no valid bit is encountered until the end, the subsequent bits are returned.
return retBits;
}
}
// Obtain valid data from the loc location.
for (uint32_t i = 0; i < bits; i++) {
uint32_t nw = loc / BN_UINT_BITS; /* shift words */
uint32_t nb = loc % BN_UINT_BITS; /* shift retBits */
(*eLimb) <<= 1;
(*eLimb) |= ((e[nw] >> nb) & 1);
retBits++;
if (loc == 0) {
// The remaining data is insufficient and the system exits early.
break;
}
loc--;
}
// The data must be 0 or an odd number.
while ((*eLimb) != 0 && ((*eLimb) & 1) == 0) {
// If eLimb is not 0 and is an even number, shift the eLimb to right.
(*eLimb) >>= 1;
retBits--;
}
return retBits;
}
/* r = (a1 ^ e1) * (a2 ^ e2) mod mont */
static int32_t MontExpMul(BN_UINT *r, const BN_BigNum *a1, const BN_BigNum *e1,
const BN_BigNum *a2, const BN_BigNum *e2, BN_Mont *mont, BN_Optimizer *opt)
{
bool consttime = false;
BN_UINT eLimb1, eLimb2;
uint32_t bit1 = 0;
uint32_t bit2 = 0;
// The window retains only the values whose exponent is an odd number, reduce storage in half.
BN_BigNum *table1[32] = { 0 }; /* 0 -- (2^6 >> 1), that is 0 -- 32 */
BN_BigNum *table2[32] = { 0 }; /* 0 -- (2^6 >> 1), that is 0 -- 32 */
int32_t ret = OptimizerStart(opt);
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
return ret;
}
uint32_t base1 = BinBits(e1->data, e1->size);
uint32_t base2 = BinBits(e2->data, e2->size);
uint32_t base = (base1 > base2) ? base1 : base2;
uint32_t perSize1 = GetReadySize(base1);
uint32_t perSize2 = GetReadySize(base2);
const uint32_t readySize1 = 1 << (perSize1 - 1);
const uint32_t readySize2 = 1 << (perSize2 - 1);
// Generate the pre-computation table.
(void)memcpy_s(mont->b, mont->mSize * sizeof(BN_UINT), a1->data, mont->mSize * sizeof(BN_UINT));
GOTO_ERR_IF(MontExpOddReady(table1, readySize1, mont, opt, consttime), ret);
(void)memcpy_s(mont->b, mont->mSize * sizeof(BN_UINT), a2->data, mont->mSize * sizeof(BN_UINT));
GOTO_ERR_IF(MontExpOddReady(table2, readySize2, mont, opt, consttime), ret);
// Obtain the first data.
GOTO_ERR_IF(GetFirstData(r, base1, base2, table1, table2, mont, opt), ret);
base--;
while (base != 0) {
bit1 = (bit1 == 0) ? GetOddLimbBin(e1->data, &eLimb1, base, perSize1, e1->size) : bit1;
bit2 = (bit2 == 0) ? GetOddLimbBin(e2->data, &eLimb2, base, perSize2, e2->size) : bit2;
uint32_t bit = (bit1 < bit2) ? bit1 : bit2;
for (uint32_t i = 0; i < bit; i++) {
GOTO_ERR_IF(MontSqrBin(r, mont, opt, consttime), ret);
}
if (bit == bit1 && eLimb1 != 0) {
GOTO_ERR_IF(MontMulBin(r, r, table1[(eLimb1 - 1) >> 1]->data, mont, opt, consttime), ret);
}
if (bit == bit2 && eLimb2 != 0) {
GOTO_ERR_IF(MontMulBin(r, r, table2[(eLimb2 - 1) >> 1]->data, mont, opt, consttime), ret);
}
bit1 -= bit;
bit2 -= bit;
base -= bit;
};
ERR:
OptimizerEnd(opt);
return ret;
}
static int32_t MontExpMulParaCheck(BN_BigNum *r, const BN_BigNum *a1,
const BN_BigNum *e1, const BN_BigNum *a2, const BN_BigNum *e2, const BN_Mont *mont,
const BN_Optimizer *opt)
{
if (r == NULL || a1 == NULL || e1 == NULL || a2 == NULL || e2 == NULL || mont == NULL || opt == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return CRYPT_NULL_INPUT;
}
if (e1->sign || e2->sign) {
BSL_ERR_PUSH_ERROR(CRYPT_BN_ERR_EXP_NO_NEGATIVE);
return CRYPT_BN_ERR_EXP_NO_NEGATIVE;
}
return BnExtend(r, mont->mSize);
}
typedef struct {
BN_BigNum *a1;
BN_BigNum *a2;
BN_BigNum *e1;
BN_BigNum *e2;
} MontsMulFactor;
static int32_t MontsFactorGetByOptThenCopy(MontsMulFactor *dst, const MontsMulFactor *src,
uint32_t mSize, BN_Optimizer *opt)
{
dst->a1 = OptimizerGetBn(opt, mSize);
dst->a2 = OptimizerGetBn(opt, mSize);
dst->e1 = OptimizerGetBn(opt, mSize);
dst->e2 = OptimizerGetBn(opt, mSize);
if (dst->a1 == NULL || dst->a2 == NULL || dst->e1 == NULL || dst->e2 == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_BN_OPTIMIZER_GET_FAIL);
return CRYPT_BN_OPTIMIZER_GET_FAIL;
}
int32_t ret = BN_Copy(dst->a1, src->a1);
if (ret != CRYPT_SUCCESS) {
return ret;
}
ret = BN_Copy(dst->a2, src->a2);
if (ret != CRYPT_SUCCESS) {
return ret;
}
ret = BN_Copy(dst->e1, src->e1);
if (ret != CRYPT_SUCCESS) {
return ret;
}
return BN_Copy(dst->e2, src->e2);
}
/* r = (a1 ^ e1) * (a2 ^ e2) mod mont */
int32_t BN_MontExpMul(BN_BigNum *r, const BN_BigNum *a1, const BN_BigNum *e1,
const BN_BigNum *a2, const BN_BigNum *e2, BN_Mont *mont, BN_Optimizer *opt)
{
int32_t ret = MontExpMulParaCheck(r, a1, e1, a2, e2, mont, opt);
if (ret != CRYPT_SUCCESS) {
return ret;
}
if (BinCmp(a2->data, a2->size, mont->mod, mont->mSize) >= 0 ||
BinCmp(a1->data, a1->size, mont->mod, mont->mSize) >= 0) {
/* a1 >= mod || a2 >= mod */
BSL_ERR_PUSH_ERROR(CRYPT_BN_MONT_BASE_TOO_MAX);
return CRYPT_BN_MONT_BASE_TOO_MAX;
}
if (BN_IsZero(a1) || BN_IsZero(a2)) {
return BN_Zeroize(r);
}
if (BN_IsZero(e1)) {
return MontExpCore(r, a2, e2, mont, opt, false);
}
if (BN_IsZero(e2)) {
return MontExpCore(r, a1, e1, mont, opt, false);
}
ret = OptimizerStart(opt);
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
return ret;
}
MontsMulFactor factor;
const MontsMulFactor srcFactor = {(BN_BigNum *)(uintptr_t)a1, (BN_BigNum *)(uintptr_t)a2,
(BN_BigNum *)(uintptr_t)e1, (BN_BigNum *)(uintptr_t)e2};
GOTO_ERR_IF_EX(MontsFactorGetByOptThenCopy(&factor, &srcFactor, mont->mSize, opt), ret);
/* field conversion */
GOTO_ERR_IF(MontEncBin(factor.a1->data, mont, opt, false), ret);
GOTO_ERR_IF(MontEncBin(factor.a2->data, mont, opt, false), ret);
/* modular exponentiation */
GOTO_ERR_IF_EX(MontExpMul(r->data, factor.a1, factor.e1, factor.a2, factor.e2, mont, opt), ret);
/* field conversion */
MontDecBin(r->data, mont);
r->size = BinFixSize(r->data, mont->mSize);
r->sign = false;
ERR:
OptimizerEnd(opt);
return ret;
}
#endif
#if defined(HITLS_CRYPTO_RSA)
int32_t MontMulCore(BN_BigNum *r, const BN_BigNum *a, const BN_BigNum *b, BN_Mont *mont, BN_Optimizer *opt)
{
int32_t ret;
BN_BigNum *t1 = OptimizerGetBn(opt, mont->mSize);
if (t1 == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_BN_OPTIMIZER_GET_FAIL);
return CRYPT_BN_OPTIMIZER_GET_FAIL;
}
BN_COPY_BYTES(t1->data, mont->mSize, a->data, a->size);
BN_COPY_BYTES(r->data, mont->mSize, b->data, b->size);
GOTO_ERR_IF(MontEncBin(t1->data, mont, opt, false), ret);
GOTO_ERR_IF(MontEncBin(r->data, mont, opt, false), ret);
GOTO_ERR_IF(MontMulBin(r->data, t1->data, r->data, mont, opt, false), ret);
MontDecBin(r->data, mont);
r->size = BinFixSize(r->data, mont->mSize);
ERR:
return ret;
}
#endif // HITLS_CRYPTO_RSA
#if defined(HITLS_CRYPTO_BN_PRIME)
int32_t MontSqrCore(BN_BigNum *r, const BN_BigNum *a, BN_Mont *mont, BN_Optimizer *opt)
{
int32_t ret;
BN_COPY_BYTES(r->data, mont->mSize, a->data, a->size);
GOTO_ERR_IF(MontEncBin(r->data, mont, opt, false), ret);
GOTO_ERR_IF(MontSqrBin(r->data, mont, opt, false), ret);
MontDecBin(r->data, mont);
r->size = BinFixSize(r->data, mont->mSize);
ERR:
return ret;
}
#endif // HITLS_CRYPTO_BN_PRIME
#ifdef HITLS_CRYPTO_CURVE_MONT
int32_t BnMontEnc(BN_BigNum *r, BN_Mont *mont, BN_Optimizer *opt, bool consttime)
{
int32_t ret;
GOTO_ERR_IF(MontEncBin(r->data, mont, opt, consttime), ret);
r->size = BinFixSize(r->data, mont->mSize);
ERR:
return ret;
}
void BnMontDec(BN_BigNum *r, BN_Mont *mont)
{
MontDecBin(r->data, mont);
r->size = BinFixSize(r->data, mont->mSize);
}
int32_t BN_EcPrimeMontSqr(BN_BigNum *r, const BN_BigNum *a, void *data, BN_Optimizer *opt)
{
if (r == NULL || a == NULL || data == NULL || opt == NULL) {
BSL_ERR_PUSH_ERROR((CRYPT_NULL_INPUT));
return CRYPT_NULL_INPUT;
}
int32_t ret;
BN_Mont *mont = (BN_Mont *)data;
BN_COPY_BYTES(r->data, mont->mSize, a->data, a->size);
GOTO_ERR_IF(MontSqrBin(r->data, mont, opt, false), ret);
r->size = BinFixSize(r->data, mont->mSize);
ERR:
return ret;
}
int32_t BN_EcPrimeMontMul(BN_BigNum *r, const BN_BigNum *a, const BN_BigNum *b, void *data, BN_Optimizer *opt)
{
if (r == NULL || a == NULL || b == NULL || data == NULL || opt == NULL) {
BSL_ERR_PUSH_ERROR((CRYPT_NULL_INPUT));
return CRYPT_NULL_INPUT;
}
int32_t ret;
BN_Mont *mont = (BN_Mont *)data;
GOTO_ERR_IF(MontMulBin(r->data, a->data, b->data, mont, opt, false), ret);
r->size = BinFixSize(r->data, mont->mSize);
ERR:
return ret;
}
#endif // HITLS_CRYPTO_CURVE_MONT
#endif /* HITLS_CRYPTO_BN */
| 2302_82127028/openHiTLS-examples_1508 | crypto/bn/src/bn_mont.c | C | unknown | 27,907 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#ifndef BN_MONTBIN_H
#define BN_MONTBIN_H
#include "hitls_build.h"
#ifdef HITLS_CRYPTO_BN
#include <stdint.h>
#include "crypt_bn.h"
#ifdef __cplusplus
extern "c" {
#endif
/* r = reduce(r * r) mod mont */
int32_t MontSqrBin(BN_UINT *r, BN_Mont *mont, BN_Optimizer *opt, bool consttime);
/* r = reduce(a * b) mod mont */
int32_t MontMulBin(BN_UINT *r, const BN_UINT *a, const BN_UINT *b, BN_Mont *mont,
BN_Optimizer *opt, bool consttime);
/* r = reduce(r * montRR) mod mont */
int32_t MontEncBin(BN_UINT *r, BN_Mont *mont, BN_Optimizer *opt, bool consttime);
/* r = reduce(x * 1) mod m = (x * R') mod m */
void Reduce(BN_UINT *r, BN_UINT *x, const BN_UINT *one, const BN_UINT *m, uint32_t mSize, BN_UINT m0);
#ifdef __cplusplus
}
#endif
#endif /* HITLS_CRYPTO_BN */
#endif | 2302_82127028/openHiTLS-examples_1508 | crypto/bn/src/bn_montbin.h | C | unknown | 1,328 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#include "hitls_build.h"
#if defined(HITLS_CRYPTO_BN) && defined(HITLS_CRYPTO_ECC)
#include "securec.h"
#include "bsl_sal.h"
#include "bsl_err_internal.h"
#include "crypt_errno.h"
#include "bn_bincal.h"
// Refresh the valid length of the BigNum r. The maximum length is modSize.
static void UpdateSize(BN_BigNum *r, uint32_t modSize)
{
uint32_t size = modSize;
while (size > 0) {
if (r->data[size - 1] != 0) {
break;
}
size--;
}
if (r->size > modSize) {
// Clear the high bits.
uint32_t i = 0;
for (i = modSize; i < r->size; i++) {
r->data[i] = 0;
}
}
r->size = size;
r->sign = false;
}
#define P521SIZE SIZE_OF_BNUINT(521)
#define SIZE_OF_BNUINT(bits) (((bits) + BN_UINT_BITS - 1) / BN_UINT_BITS) // 1byte = 8bit
#if defined(HITLS_SIXTY_FOUR_BITS)
#define P224SIZE SIZE_OF_BNUINT(224)
#define P256SIZE SIZE_OF_BNUINT(256)
#define P384SIZE SIZE_OF_BNUINT(384)
BN_UINT g_modDataP224[][P224SIZE] = {
{ // 1p
0x0000000000000001UL, 0xffffffff00000000UL,
0xffffffffffffffffUL, 0x00000000ffffffffUL
},
{ // 2p
0x0000000000000002UL, 0xfffffffe00000000UL,
0xffffffffffffffffUL, 0x00000001ffffffffUL
}
};
BN_UINT g_modDataP256[][P256SIZE] = {
{ // p
0xffffffffffffffffUL, 0x00000000ffffffffUL,
0x0000000000000000UL, 0xffffffff00000001UL
},
{ // 2p
0xfffffffffffffffeUL, 0x00000001ffffffffUL,
0x0000000000000000UL, 0xfffffffe00000002UL
},
{ // 3p
0xfffffffffffffffdUL, 0x00000002ffffffffUL,
0x0000000000000000UL, 0xfffffffd00000003UL
},
{ // 4p
0xfffffffffffffffcUL, 0x00000003ffffffffUL,
0x0000000000000000UL, 0xfffffffc00000004UL
},
{ // 5p
0xfffffffffffffffbUL, 0x00000004ffffffffUL,
0x0000000000000000UL, 0xfffffffb00000005UL
},
};
#ifdef HITLS_CRYPTO_CURVE_SM2
const BN_UINT MODDATASM2P256[][P256SIZE] = {
{ // p
0xffffffffffffffffUL, 0xffffffff00000000UL,
0xffffffffffffffffUL, 0xfffffffeffffffffUL
},
{ // 2p
0xfffffffffffffffeUL, 0xfffffffe00000001UL,
0xffffffffffffffffUL, 0xfffffffdffffffffUL
},
{ // 3p
0xfffffffffffffffdUL, 0xfffffffd00000002UL,
0xffffffffffffffffUL, 0xfffffffcffffffffUL
},
{ // 4p
0xfffffffffffffffcUL, 0xfffffffc00000003UL,
0xffffffffffffffffUL, 0xfffffffbffffffffUL
},
{ // 5p
0xfffffffffffffffbUL, 0xfffffffb00000004UL,
0xffffffffffffffffUL, 0xfffffffaffffffffUL
},
{ // 6p
0xfffffffffffffffaUL, 0xfffffffa00000005UL,
0xffffffffffffffffUL, 0xfffffff9ffffffffUL
},
{ // 7p
0xfffffffffffffff9UL, 0xfffffff900000006UL,
0xffffffffffffffffUL, 0xfffffff8ffffffffUL
},
{ // 8p
0xfffffffffffffff8UL, 0xfffffff800000007UL,
0xffffffffffffffffUL, 0xfffffff7ffffffffUL
},
{ // 9p
0xfffffffffffffff7UL, 0xfffffff700000008UL,
0xffffffffffffffffUL, 0xfffffff6ffffffffUL
},
{ // 10p
0xfffffffffffffff6UL, 0xfffffff600000009UL,
0xffffffffffffffffUL, 0xfffffff5ffffffffUL
},
{ // 11p
0xfffffffffffffff5UL, 0xfffffff50000000aUL,
0xffffffffffffffffUL, 0xfffffff4ffffffffUL
},
{ // 12p
0xfffffffffffffff4UL, 0xfffffff40000000bUL,
0xffffffffffffffffUL, 0xfffffff3ffffffffUL
},
{ // 13p
0xfffffffffffffff3UL, 0xfffffff30000000cUL,
0xffffffffffffffffUL, 0xfffffff2ffffffffUL
},
};
#endif
const BN_UINT MOD_DATA_P384[][P384SIZE] = {
{
0x00000000ffffffffUL, 0xffffffff00000000UL, 0xfffffffffffffffeUL,
0xffffffffffffffffUL, 0xffffffffffffffffUL, 0xffffffffffffffffUL
},
{
0x00000001fffffffeUL, 0xfffffffe00000000UL, 0xfffffffffffffffdUL,
0xffffffffffffffffUL, 0xffffffffffffffffUL, 0xffffffffffffffffUL
},
{
0x00000002fffffffdUL, 0xfffffffd00000000UL, 0xfffffffffffffffcUL,
0xffffffffffffffffUL, 0xffffffffffffffffUL, 0xffffffffffffffffUL
},
{
0x00000003fffffffcUL, 0xfffffffc00000000UL, 0xfffffffffffffffbUL,
0xffffffffffffffffUL, 0xffffffffffffffffUL, 0xffffffffffffffffUL
},
{
0x00000004fffffffbUL, 0xfffffffb00000000UL, 0xfffffffffffffffaUL,
0xffffffffffffffffUL, 0xffffffffffffffffUL, 0xffffffffffffffffUL
},
};
const BN_UINT MOD_DATA_P521[P521SIZE] = {
0xffffffffffffffffUL, 0xffffffffffffffffUL, 0xffffffffffffffffUL,
0xffffffffffffffffUL, 0xffffffffffffffffUL, 0xffffffffffffffffUL,
0xffffffffffffffffUL, 0xffffffffffffffffUL, 0x00000000000001ffUL
};
static BN_UINT NistP384Add(BN_UINT *r, const BN_UINT *a, const BN_UINT *b, uint32_t n)
{
(void)n;
BN_UINT carry = 0;
ADD_ABC(carry, r[0], a[0], b[0], carry); /* offset 0 */
ADD_ABC(carry, r[1], a[1], b[1], carry); /* offset 1 */
ADD_ABC(carry, r[2], a[2], b[2], carry); /* offset 2 */
ADD_ABC(carry, r[3], a[3], b[3], carry); /* offset 3 */
ADD_ABC(carry, r[4], a[4], b[4], carry); /* offset 4 */
ADD_ABC(carry, r[5], a[5], b[5], carry); /* offset 5 */
return carry;
}
static BN_UINT NistP384Sub(BN_UINT *r, const BN_UINT *a, const BN_UINT *b, uint32_t n)
{
(void)n;
BN_UINT borrow = 0;
SUB_ABC(borrow, r[0], a[0], b[0], borrow); /* offset 0 */
SUB_ABC(borrow, r[1], a[1], b[1], borrow); /* offset 1 */
SUB_ABC(borrow, r[2], a[2], b[2], borrow); /* offset 2 */
SUB_ABC(borrow, r[3], a[3], b[3], borrow); /* offset 3 */
SUB_ABC(borrow, r[4], a[4], b[4], borrow); /* offset 4 */
SUB_ABC(borrow, r[5], a[5], b[5], borrow); /* offset 5 */
return borrow;
}
/**
* Reduction item: 2^128 + 2^96 - 2^32+ 2^0
*
* Reduction list 11 10 9 8 7 6 5 4 3 2 1 0
* a12 00, 00, 00, 00, 00, 00, 00, 01, 01, 00, -1, 01,
* a13 00, 00, 00, 00, 00, 00, 01, 01, 00, -1, 01, 00,
* a14 00, 00, 00, 00, 00, 01, 01, 00, -1, 01, 00, 00,
* a15 00, 00, 00, 00, 01, 01, 00, -1, 01, 00, 00, 00,
* a16 00, 00, 00, 01, 01, 00, -1, 01, 00, 00, 00, 00,
* a17 00, 00, 01, 01, 00, -1, 01, 00, 00, 00, 00, 00,
* a18 00, 01, 01, 00, -1, 01, 00, 00, 00, 00, 00, 00,
* a19 01, 01, 00, -1, 01, 00, 00, 00, 00, 00, 00, 00,
* a20 01, 00, -1, 01, 00, 00, 00, 01, 01, 00, -1, 01,
* a21 00, -1, 01, 00, 00, 00, 01, 02, 01, -1, 00, 01,
* a22 -1, 01, 00, 00, 00, 01, 02, 01, -1, 00, 01, 00,
* a23 01, 00, 00, 00, 01, 02, 01, -2, -1, 01, 01, -1
*
* Reduction chain
* Coefficient 11 10 9 8 7 6 5 4 3 2 1 0
* 1 a23 a22 a21 a20 a19 a18 a17 a16 a15 a14 a13 a12
* 1 a20 a19 a18 a17 a16 a15 a14 a13 a12 a23 a22 a21
* 1 a19 a18 a17 a16 a15 a14 a13 a12 a20 a23 a20
* 1 a23 a22 a21 a20 a21
* 1 a23 a22
* 2 a23 a22 a21
* -1 a22 a21 a20 a19 a18 a17 a16 a15 a14 a13 a12 a23
* -1 a23 a22 a21 a20
* -1 a23 a23
*/
int8_t ReduceNistP384(BN_UINT *r, const BN_UINT *a)
{
BN_UINT list[P384SIZE];
BN_UINT t[P384SIZE];
// 0
list[5] = a[11]; // offset 5 a23|a22 == ah[11]|al[11]
list[4] = a[10]; // offset 4 a21|a20 == ah[10]|al[10]
list[3] = a[9]; // offset 3 a19|a18 == ah[9]|al[9]
list[2] = a[8]; // offset 2 a17|a16 == ah[8]|al[8]
list[1] = a[7]; // offset 1 a15|a14 == ah[7]|al[7]
list[0] = a[6]; // offset 0 a13|a12 == ah[6]|al[6]
// 1
t[5] = BN_UINT_LO_TO_HI(a[10]) | BN_UINT_HI(a[9]); // offset 5 a20|a19 == al[10]|ah[9]
t[4] = BN_UINT_LO_TO_HI(a[9]) | BN_UINT_HI(a[8]); // offset 4 a18|a17 == al[9]|ah[8]
t[3] = BN_UINT_LO_TO_HI(a[8]) | BN_UINT_HI(a[7]); // offset 3 a16|a15 == al[8]|ah[7]
t[2] = BN_UINT_LO_TO_HI(a[7]) | BN_UINT_HI(a[6]); // offset 2 a14|a13 == al[7]|ah[6]
t[1] = BN_UINT_LO_TO_HI(a[6]) | BN_UINT_HI(a[11]); // offset 1 a12|a23 == al[6]|ah[11]
t[0] = BN_UINT_LO_TO_HI(a[11]) | BN_UINT_HI(a[10]); // offset 0 a22|a21 == al[11]|ah[10]
int8_t carry = (int8_t)NistP384Add(t, list, t, P384SIZE);
// 2
list[5] = a[9]; // offset 5 a19|a18 == ah[9]|al[9]
list[4] = a[8]; // offset 4 a17|a16 == ah[8]|al[8]
list[3] = a[7]; // offset 3 a15|a14 == ah[7]|al[7]
list[2] = a[6]; // offset 2 a13|a12 == ah[6]|al[6]
list[1] = BN_UINT_LO_TO_HI(a[10]); // offset 1 a20|0 == al[10]| 0
list[0] = BN_UINT_HI_TO_HI(a[11]) | BN_UINT_LO(a[10]); // offset 0 a23|a20 == ah[11]|al[10]
carry += (int8_t)NistP384Add(t, list, t, P384SIZE);
// 3
list[5] = 0; // offset 5 0
list[4] = 0; // offset 4 0
list[3] = a[11]; // offset 3 a23|a22 == ah[11]|al[11]
list[2] = a[10]; // offset 2 a21|a20 == ah[10]|al[10]
list[1] = BN_UINT_HI_TO_HI(a[10]); // offset 1 a21|0 == ah[10]|0
list[0] = 0; // offset 0 0
carry += (int8_t)NistP384Add(t, list, t, P384SIZE);
// 4
list[5] = 0; // offset 5 0
list[4] = 0; // offset 4 0
list[3] = 0; // offset 3 0
list[2] = a[11]; // offset 2 a23|a22 == ah[11]|al[11]
list[1] = 0; // offset 1 0
list[0] = 0; // offset 0 0
carry += (int8_t)NistP384Add(t, list, t, P384SIZE);
// 5
list[5] = 0; // offset 5 0
list[4] = 0; // offset 4 0
list[3] = BN_UINT_HI(a[11]); // offset 3 0|a23 == 0|ah[11]
list[2] = BN_UINT_LO_TO_HI(a[11]) | BN_UINT_HI(a[10]); // offset 2 a22|a21 == al[11]|ah[10]
list[1] = 0; // offset 1 0
list[0] = 0; // offset 0 0
// double 5
// list[3] is left-shifted by 1 bit and the most significant bit of list[2] is added.
list[3] = (list[2] >> (BN_UINT_BITS - 1)) | (list[3] << 1);
list[2] = list[2] << 1; // list[2] left-shifted by 1bit
carry += (int8_t)NistP384Add(t, list, t, P384SIZE);
// 6
list[5] = BN_UINT_LO_TO_HI(a[11]) | BN_UINT_HI(a[10]); // offset 5 a22|a21 == al[11]|ah[10]
list[4] = BN_UINT_LO_TO_HI(a[10]) | BN_UINT_HI(a[9]); // offset 4 a20|a19 == al[10]|ah[9]
list[3] = BN_UINT_LO_TO_HI(a[9]) | BN_UINT_HI(a[8]); // offset 3 a18|a17 == al[9]|ah[8]
list[2] = BN_UINT_LO_TO_HI(a[8]) | BN_UINT_HI(a[7]); // offset 2 a16|a15 == al[8]|ah[7]
list[1] = BN_UINT_LO_TO_HI(a[7]) | BN_UINT_HI(a[6]); // offset 1 a14|a13 == al[7]|ah[6]
list[0] = BN_UINT_LO_TO_HI(a[6]) | BN_UINT_HI(a[11]); // offset 0 a12|a23 == al[6]|ah[11]
carry -= (int8_t)NistP384Sub(t, t, list, P384SIZE);
// 7
list[5] = 0; // offset 5 0
list[4] = 0; // offset 4 0
list[3] = 0; // offset 3 0
list[2] = BN_UINT_HI(a[11]); // offset 2 0|a23 == 0|ah[11]
list[1] = BN_UINT_LO_TO_HI(a[11]) | BN_UINT_HI(a[10]); // offset 1 a22|a21 == al[11]|ah[10]
list[0] = BN_UINT_LO_TO_HI(a[10]); // offset 0 a20|0 == al[10]|0
carry -= (int8_t)NistP384Sub(t, t, list, P384SIZE);
// 8
list[5] = 0; // offset 5 0
list[4] = 0; // offset 4 0
list[3] = 0; // offset 3 0
list[2] = BN_UINT_HI(a[11]); // offset 2 0|a23 == 0|ah[11]
list[1] = BN_UINT_HI_TO_HI(a[11]); // offset 1 a23|0 == ah[11]|0
list[0] = 0; // offset 0
carry -= (int8_t)NistP384Sub(t, t, list, P384SIZE);
carry += (int8_t)NistP384Add(r, t, a, P384SIZE);
return carry;
}
// The size of a is 2*P384SIZE, and the size of r is P384SIZE
int32_t ModNistP384(BN_BigNum *r, const BN_BigNum *a, const BN_BigNum *m, BN_Optimizer *opt)
{
(void)opt;
(void)m;
const BN_UINT *mod = MOD_DATA_P384[0];
int8_t carry = ReduceNistP384(r->data, a->data);
if (carry > 0) {
carry = (int8_t)1 - (int8_t)BinSub(r->data, r->data, MOD_DATA_P384[carry - 1], P384SIZE);
} else if (carry < 0) {
// For details could ref p256.
carry = (int8_t)1 - (int8_t)BinAdd(r->data, r->data, MOD_DATA_P384[-carry - 1], P384SIZE);
carry = -carry;
}
if (carry < 0) {
BinAdd(r->data, r->data, mod, P384SIZE);
} else if (carry > 0 || BinCmp(r->data, P384SIZE, mod, P384SIZE) >= 0) {
BinSub(r->data, r->data, mod, P384SIZE);
}
UpdateSize(r, P384SIZE);
return 0;
}
// Reduction item: 2^0
int8_t ReduceNistP521(BN_UINT *r, const BN_UINT *a)
{
#define P521LEFTBITS (521 % (sizeof(BN_UINT) * 8))
#define P521RIGHTBITS ((sizeof(BN_UINT) * 8) - P521LEFTBITS)
BN_UINT t[P521SIZE];
uint32_t base = P521SIZE - 1;
uint32_t i;
for (i = 0; i < P521SIZE - 1; i++) {
t[i] = (a[i + base] >> P521LEFTBITS) | (a[i + base + 1] << P521RIGHTBITS);
r[i] = a[i];
}
r[i] = a[i] & (((BN_UINT)1 << (P521LEFTBITS)) - 1);
t[i] = (a[i + base] >> P521LEFTBITS);
BinAdd(r, t, r, P521SIZE);
return 0;
}
// The size of a is 2*P521SIZE-1, and the size of r is P521SIZE
int32_t ModNistP521(BN_BigNum *r, const BN_BigNum *a, const BN_BigNum *m, BN_Optimizer *opt)
{
(void)opt;
(void)m;
const BN_UINT *mod = MOD_DATA_P521;
ReduceNistP521(r->data, a->data);
if (BinCmp(r->data, P521SIZE, mod, P521SIZE) >= 0) {
BinSub(r->data, r->data, mod, P521SIZE);
}
UpdateSize(r, P521SIZE);
return 0;
}
static inline int8_t P256SUB(BN_UINT *rr, const BN_UINT *aa, const BN_UINT *bb)
{
BN_UINT borrow;
SUB_AB(borrow, rr[0], aa[0], bb[0]);
SUB_ABC(borrow, rr[1], aa[1], bb[1], borrow); /* offset 1 */
SUB_ABC(borrow, rr[2], aa[2], bb[2], borrow); /* offset 2 */
SUB_ABC(borrow, rr[3], aa[3], bb[3], borrow); /* offset 3 */
return (int8_t)borrow;
}
static inline int8_t P256ADD(BN_UINT *rr, const BN_UINT *aa, const BN_UINT *bb)
{
BN_UINT carry;
ADD_AB(carry, rr[0], aa[0], bb[0]); /* offset 0 */
ADD_ABC(carry, rr[1], aa[1], bb[1], carry); /* offset 1 */
ADD_ABC(carry, rr[2], aa[2], bb[2], carry); /* offset 2 */
ADD_ABC(carry, rr[3], aa[3], bb[3], carry); /* offset 3 */
return (int8_t)carry;
}
/**
* NIST_P256 curve reduction calculation for parameter P
* Reduction item: 2^224 - 2^192 - 2^96 + 2^0
* ref. https://csrc.nist.gov/csrc/media/events/workshop-on-elliptic-curve-cryptography-standards/documents/papers/session6-adalier-mehmet.pdf
*
* Reduction list:
* 7 6 5 4 3 2 1 0
* a8 01, -1, 00, 00, -1, 00, 00, 01,
* a9 00, -1, 00, -1, -1, 00, 01, 01,
* a10 -1, 00, -1, -1, 00, 01, 01, 00,
* a11 -1, 00, -1, 00, 02, 01, 00, -1,
* a12 -1, 00, 00, 02, 02, 00, -1, -1,
* a13 -1, 01, 02, 02, 01, -1, -1, -1,
* a14 00, 03, 02, 01, 00, -1, -1, -1,
* a15 03, 02, 01, 00, -1, -1, -1, 00
*
* Reduction chain
* Compared with the reduce flow of the paper, we have made proper transformation,
* which can reduce the splicing of upper 32 bits and lower 32 bits.
* Coefficient 7 6 5 4 3 2 1 0
* 2 a15 a14 a13 a12 a12 0 0 0
* 2 a15 a14 a13 a11
* 1 a15 a14 a15 a14 a13 a11 a9 a8
* 1 a8 a13 a10 a10 a9
* -1 a13 a9 a11 a10 a15 a14 a15 a14
* -1 a12 a8 a10 a9 a8 a13 a14 a13
* -1 a11 a9 a15 a13 a12
* -1 a10 a12 a11
*/
static int8_t ReduceNistP256(BN_UINT *r, const BN_UINT *a)
{
BN_UINT list[P256SIZE];
BN_UINT t[P256SIZE];
// Reduction chain 0
list[3] = a[7]; // offset 3 a15|a14 == ah[7]|al[7]
list[2] = a[6]; // offset 2 a13|a12 == ah[6]|al[6]
list[1] = BN_UINT_LO_TO_HI(a[6]); // offset 1 a12|0 == al[6]|0
list[0] = 0; // offset 0 0
// Reduction chain 1
t[3] = BN_UINT_HI(a[7]); // offset 3 0|a15 == 0|ah[7]
t[2] = BN_UINT_LO_TO_HI(a[7]) | BN_UINT_HI(a[6]); // offset 2 a14|a13 == al[7]|ah[6]
t[1] = BN_UINT_HI_TO_HI(a[5]); // offset 1 a11|0 == ah[5]|0
t[0] = 0; // offset 0 0
int8_t carry = P256ADD(t, t, list);
// carry multiplied by 2 and padded with the most significant bit of t[3]
carry = (carry * 2) + (int8_t)(t[3] >> (BN_UINT_BITS - 1));
t[3] = (t[3] << 1) | (t[2] >> (BN_UINT_BITS - 1)); // t[3] is shifted left by 1 bit and the MSB of t[2] is added.
t[2] = (t[2] << 1) | (t[1] >> (BN_UINT_BITS - 1)); // t[2] is shifted left by 1 bit and the MSB of t[1] is added.
t[1] = (t[1] << 1) | (t[0] >> (BN_UINT_BITS - 1)); // t[1] is shifted left by 1 bit and the MSB of t[0] is added.
t[0] <<= 1;
// 2
list[3] = a[7]; // offset 3 a15|a14 == ah[7]|al[7]
list[2] = a[7]; // offset 2 a15|a14 == ah[7]|al[7]
list[1] = BN_UINT_HI_TO_HI(a[6]) | BN_UINT_HI(a[5]); // offset 1 a13|a11 == ah[6]|ah[5]
list[0] = a[4]; // offset 0 a9|a8 == ah[4]|al[4]
carry += (int8_t)P256ADD(t, t, list);
// 3
list[3] = BN_UINT_LO_TO_HI(a[4]) | BN_UINT_HI(a[6]); // offset 3 a8|a13 == al[4]|ah[6]
list[2] = 0; // offset 2 0
list[1] = BN_UINT_LO(a[5]); // offset 1 0|a10 == 0|al[5]
list[0] = BN_UINT_LO_TO_HI(a[5]) | BN_UINT_HI(a[4]); // offset 0 a10|a9 == al[5]|ah[4]
carry += (int8_t)P256ADD(t, t, list);
// 4
list[3] = BN_UINT_HI_TO_HI(a[6]) | BN_UINT_HI(a[4]); // offset 3 a13|a9 == ah[6]|ah[4]
list[2] = a[5]; // offset 2 a11|a10 == ah[5]|al[5]
list[1] = a[7]; // offset 1 a15|a14 == ah[7]|al[7]
list[0] = a[7]; // offset 0 a15|a14 == ah[7]|al[7]
carry -= (int8_t)P256SUB(t, t, list);
// 5
list[3] = BN_UINT_LO_TO_HI(a[6]) | BN_UINT_LO(a[4]); // offset 3 a12|a8 == al[6]|al[4]
list[2] = BN_UINT_LO_TO_HI(a[5]) | BN_UINT_HI(a[4]); // offset 2 a10|a9 == al[5]|ah[4]
list[1] = BN_UINT_LO_TO_HI(a[4]) | BN_UINT_HI(a[6]); // offset 1 a8|a13 == al[4]|ah[6]
list[0] = BN_UINT_LO_TO_HI(a[7]) | BN_UINT_HI(a[6]); // offset 0 a14|a13 == al[7]|ah[6]
carry -= (int8_t)P256SUB(t, t, list);
// 6
list[3] = BN_UINT_HI_TO_HI(a[5]); // offset 3 a11|0 == ah[5]|0
list[2] = 0; // offset 2 0
list[1] = BN_UINT_HI_TO_HI(a[4]) | BN_UINT_HI(a[7]); // offset 1 a9|a15 == ah[4]|ah[7]
list[0] = a[6]; // offset 0 a13|a12 == ah[6]|al[6]
carry -= (int8_t)P256SUB(t, t, list);
// 7
list[3] = BN_UINT_LO_TO_HI(a[5]); // offset 3 a10|0 == al[5]|0
list[2] = 0; // offset 2 0
list[1] = 0; // offset 1 0
list[0] = BN_UINT_LO_TO_HI(a[6]) | BN_UINT_HI(a[5]); // offset 0 a12|a11 == al[6]|ah[5]
carry -= (int8_t)P256SUB(t, t, list);
carry += (int8_t)P256ADD(r, t, a);
return carry;
}
// For the NIST_P256 curve, perform modulo operation on parameter P.
// The size of a is 2*P256SIZE, and the size of r is P256SIZE
int32_t ModNistP256(BN_BigNum *r, const BN_BigNum *a, const BN_BigNum *m, BN_Optimizer *opt)
{
(void)opt;
(void)m;
const BN_UINT *mod = g_modDataP256[0];
int8_t carry = ReduceNistP256(r->data, a->data);
if (carry > 0) {
carry = (int8_t)1 - (int8_t)P256SUB(r->data, r->data, g_modDataP256[carry - 1]);
} else if (carry < 0) {
/*
* Here, we take carry < 0 as an example.
* If carry = -3, it indicates that ReduceNistP256 needs to be borrowed three times. In this case,
* we need to add 3 * p. It is worth noting that we have estimated 3 * p in g_modDataP256,
* but the carry of 3 * p is not save, which is expressed by the following formula:
* g_modDataP256[2] = 3 * p mod 2^256, we denoted as 2 + (3 * p)_remain.
* Actually, we need to calculate the following formula:
* -3 + r_data + 2 + (3 * p)_remain = -1 + r_data + (3 * p)_remain
* Obviously, -1 is a mathematical borrowing, only r_data + (3 * p)_remain is calculated in actual P256ADD.
* Therefore, we still need to consider the carry case of P256ADD.
* 1. r_data + (3 * p)_remain has a carry. -1 has been eliminated. We only need to consider
* whether r_data + (3 * p)_remain belongs to [0, p).
* 2. r_data + (3*p)_remain does not carry. It indicates that –1 is not eliminated. We need to add another P
* to eliminate –1. Considering the value of 3 * p in g_modDataP256, r_data + (3 * p)_remain + P must
* generate a carry, and the final result value < P.
*/
carry = (int8_t)1 - (int8_t)P256ADD(r->data, r->data, g_modDataP256[-carry - 1]);
carry = -carry;
}
if (carry < 0) {
P256ADD(r->data, r->data, mod);
} else if (carry > 0 || BinCmp(r->data, P256SIZE, mod, P256SIZE) >= 0) {
P256SUB(r->data, r->data, mod);
}
UpdateSize(r, P256SIZE);
return 0;
}
/**
* NIST_P224 curve reduction calculation for parameter P
* Reduction item: 2^96 - 2^0
*
* Reduction list:
* 6 5 4 3 2 1 0
* a7 00, 00, 00, 01, 00, 00, -1
* a8 00, 00, 01, 00, 00, -1, 00
* a9 00, 01, 00, 00, -1, 00, 00
* a10 01, 00, 00, -1, 00, 00, 00
* a11 00, 00, -1, 01, 00, 00, -1
* a12 00, -1, 01, 00, 00, -1, 00
* a13 -1, 01, 00, 00, -1, 00, 00
*
* Reduction chain
* Coefficient 6 5 4 3 2 1 0
* 1 a10 a9 a8 a7
* 1 a13 a12 a11
* -1 a13 a12 a11 a10 a9 a8 a7
* -1 a13 a12 a11
*/
static int8_t ReduceNistP224(BN_UINT *r, const BN_UINT *a)
{
BN_UINT list[P224SIZE];
BN_UINT t[P224SIZE];
// 1
list[3] = BN_UINT_LO(a[5]); // offset 3 0|a10 == 0|al[5]
list[2] = a[4]; // offset 2 a9|a8 == ah[4]|al[4]
list[1] = BN_UINT_HI_TO_HI(a[3]); // offset 1 a7|0 == ah[3]|0
list[0] = 0; // offset 0 0
// 2
t[3] = 0; // offset 3 0
t[2] = a[6]; // offset 2 a13|a12 == ah[6]|al[6]
t[1] = BN_UINT_HI_TO_HI(a[5]); // offset 1 a11|0 == ah[5]|0
t[0] = 0; // offset 0 0
P256ADD(t, t, list);
// 3
list[3] = BN_UINT_HI(a[6]); // offset 3 0|a13 == 0|ah[6]
list[2] = BN_UINT_LO_TO_HI(a[6]) | BN_UINT_HI(a[5]); // offset 2 a12|a11 == al[6]|ah[5]
list[1] = BN_UINT_LO_TO_HI(a[5]) | BN_UINT_HI(a[4]); // offset 1 a10|a9 == al[5]|ah[4]
list[0] = BN_UINT_LO_TO_HI(a[4]) | BN_UINT_HI(a[3]); // offset 0 a8|a7 == al[4]|ah[3]
P256SUB(t, t, list);
// 4
list[3] = 0; // offset 3 0
list[2] = 0; // offset 2 0
list[1] = BN_UINT_HI(a[6]); // offset 1 0|a13 == 0|ah[6]
list[0] = BN_UINT_LO_TO_HI(a[6]) | BN_UINT_HI(a[5]); // offset 0 a12|a11 == al[6]|ah[5]
P256SUB(t, t, list);
r[3] = BN_UINT_LO(a[3]); // Take lower 32 bits of a[3]
r[2] = a[2]; // Take a[2]
r[1] = a[1];
r[0] = a[0];
P256ADD(r, r, t);
return 0;
}
// NIST_P224 curve reduction calculation for parameter P. The size of a is 2*P224SIZE-1, and the size of r is P224SIZE
int32_t ModNistP224(
BN_BigNum *r, const BN_BigNum *a, const BN_BigNum *m, BN_Optimizer *opt)
{
(void)opt;
(void)m;
const BN_UINT *mod = g_modDataP224[0];
ReduceNistP224(r->data, a->data);
// Obtain the high-order data of r[3] as carry information
int8_t carry = (int8_t)((uint8_t)(BN_UINT_HI(r->data[3]) & 0xFF));
if (carry > 0) {
(void)P256SUB(r->data, r->data, g_modDataP224[carry - 1]);
} else if (carry < 0) {
(void)P256ADD(r->data, r->data, g_modDataP224[-carry - 1]);
}
// Obtain the high-order data of r[3] as carry information
carry = (int8_t)((uint8_t)(BN_UINT_HI(r->data[3]) & 0xFF));
if (carry < 0) {
P256ADD(r->data, r->data, mod);
} else if (carry > 0 || BinCmp(r->data, P256SIZE, mod, P256SIZE) >= 0) {
P256SUB(r->data, r->data, mod);
}
UpdateSize(r, P224SIZE);
return 0;
}
/**
* Reduction item: 2^224 + 2^96 - 2^64 + 2^0
* 7 6 5 4 3 2 1 0
* a8 01, 00, 00, 00, 01, -1, 00, 01,
* a9 01, 00, 00, 01, 00, -1, 01, 01,
* a10 01, 00, 01, 00, 00, 00, 01, 01,
* a11 01, 01, 00, 00, 01, 00, 01, 01,
* a12 02, 00, 00, 01, 01, 00, 01, 01,
* a13 02, 00, 01, 01, 02, -1, 01, 02,
* a14 02, 01, 01, 02, 01, -1, 02, 02,
* a15 03, 01, 02, 01, 01, 00, 02, 02,
* Reduction chain
* The last two reduction chain can be combined into the third to last chain for calculation.
* Coefficient 7 6 5 4 3 2 1 0
* 2 a15 a14 a15 a14 a13 0 a15 a14
* 2 a14 0 0 0 0 0 a14 a13
* 2 a13 0 a13 a12 a11 0 a12 a11
* 2 a12 a11 a10 a9 0 0 a9 a8
* 1 a15 0 0 a15 a14 0 a13 a12
* 1 a11 0 0 0 a8 0 0 a15
* 1 a10 0 0 0 a15 0 a11 a10
* 1 a9 a10 a9
* 1 a8 a15 a14 a13 a12 a15
* -1 a14 a13 a12 a11 a13 a12 a11
* -1 a11 a10 a9 0 a14 a9 a8
* -1 a8
* -1 a9
*/
#ifdef HITLS_CRYPTO_CURVE_SM2
static int8_t ReduceSm2P256(BN_UINT *r, const BN_UINT *a)
{
BN_UINT list[P256SIZE];
BN_UINT t[P256SIZE];
// Reduction chain 0, Coefficient 2
list[3] = a[7]; // offset 3 a15|a14 == ah[7]|al[7]
list[2] = a[7]; // offset 2 a15|a14 == ah[7]|al[7]
list[1] = BN_UINT_HI_TO_HI(a[6]); // offset 1 a13|0 == ah[6]|0
list[0] = a[7]; // offset 0 a15|a14 == ah[7]|al[7]
// Reduction chain 1, Coefficient 2
t[3] = BN_UINT_LO_TO_HI(a[7]); // offset 3 a14|0 == al[7]|0
t[2] = 0; // offset 2 0
t[1] = 0; // offset 1 0
t[0] = BN_UINT_LO_TO_HI(a[7]) | BN_UINT_HI(a[6]); // offset 0 a14|a13 = al[7]|ah[6]
int8_t carry = P256ADD(t, t, list);
// Reduction chain 2, Coefficient 2
list[3] = BN_UINT_HI_TO_HI(a[6]); // offset 3 a13|0 == ah[6]|0
list[2] = a[6]; // offset 2 a13|a12 == ah[6]|al[6]
list[1] = BN_UINT_HI_TO_HI(a[5]); // offset 1 a11|0 == ah[5]|0
list[0] = BN_UINT_LO_TO_HI(a[6]) | BN_UINT_HI(a[5]); // offset 0 a12|a11 == al[6]|ah[5]
carry += (int8_t)P256ADD(t, t, list);
// Reduction chain 3, Coefficient 2
list[3] = BN_UINT_LO_TO_HI(a[6]) | BN_UINT_HI(a[5]); // offset 3 a12|a11 == al[6]|ah[5]
list[2] = BN_UINT_LO_TO_HI(a[5]) | BN_UINT_HI(a[4]); // offset 2 a10|a9 == al[5]|ah[4]
list[1] = 0; // offset 1 0
list[0] = a[4]; // offset 0 a9|a8 == ah[4]|al[4]
carry += (int8_t)P256ADD(t, t, list);
// carry multiplied by 2 and padded with the most significant bit of t[3]
carry = (carry * 2) + (int8_t)(t[3] >> (BN_UINT_BITS - 1));
t[3] = (t[3] << 1) | (t[2] >> (BN_UINT_BITS - 1)); // t[3] is shifted left by 1 bit and the MSB of t[2] is added.
t[2] = (t[2] << 1) | (t[1] >> (BN_UINT_BITS - 1)); // t[2] is shifted left by 1 bit and the MSB of t[1] is added.
t[1] = (t[1] << 1) | (t[0] >> (BN_UINT_BITS - 1)); // t[1] is shifted left by 1 bit and the MSB of t[0] is added.
t[0] <<= 1;
// Reduction chain 4, Coefficient 1
list[3] = BN_UINT_HI_TO_HI(a[7]); // offset 3 a15|0 == ah[7]|0
list[2] = BN_UINT_HI(a[7]); // offset 2 0|a15 == 0|ah[7]
list[1] = BN_UINT_LO_TO_HI(a[7]); // offset 1 a14|0 == al[7]|0
list[0] = a[6]; // offset 0 a13|a12 == ah[6]|al[6]
carry += (int8_t)P256ADD(t, t, list);
// Reduction chain 5, Coefficient 1
list[3] = BN_UINT_HI_TO_HI(a[5]); // offset 3 a11|0 == ah[5]|0
list[2] = 0; // offset 2 0
list[1] = BN_UINT_LO_TO_HI(a[4]); // offset 1 a8|0 == al[4]|0
list[0] = BN_UINT_HI(a[7]); // offset 0 0|a15 == 0|ah[7]
carry += (int8_t)P256ADD(t, t, list);
// Reduction chain 6, Coefficient 1
list[3] = BN_UINT_LO_TO_HI(a[5]); // offset 3 a10|0 == al[5]|0
list[2] = 0; // offset 2 0
list[1] = BN_UINT_HI_TO_HI(a[7]); // offset 1 a15|0 == ah[7]|0
list[0] = a[5]; // offset 0 a11|a10 == ah[5]|al[5]
carry += (int8_t)P256ADD(t, t, list);
// Reduction chain 7, Coefficient 1
list[3] = BN_UINT_HI_TO_HI(a[4]); // offset 3 a9|0 == ah[4]|0
list[2] = 0; // offset 2 0
list[1] = 0; // offset 1 0
list[0] = BN_UINT_LO_TO_HI(a[5]) | BN_UINT_HI(a[4]); // offset 0 a10|a9 == al[5]|ah[4]
carry += (int8_t)P256ADD(t, t, list);
// Reduction chain 8, Coefficient 1
list[3] = BN_UINT_LO_TO_HI(a[4]) | BN_UINT_HI(a[7]); // offset 3 a8|a15 == al[4]|ah[7]
list[2] = BN_UINT_LO_TO_HI(a[7]) | BN_UINT_HI(a[6]); // offset 2 a14|a13 = al[7]|ah[6]
list[1] = BN_UINT_LO_TO_HI(a[6]); // offset 1 a12|0 == al[6]|0
list[0] = BN_UINT_HI(a[7]); // offset 0 0|a15 == 0|ah[7]
carry += (int8_t)P256ADD(t, t, list);
// Reduction chain 9, Coefficient -1
list[3] = BN_UINT_LO(a[7]); // offset 3 0|a14 == 0|al[7]
list[2] = a[6]; // offset 2 a13|a12 == ah[6]|al[6]
list[1] = BN_UINT_HI_TO_HI(a[5]) | BN_UINT_HI(a[6]); // offset 1 a11|a13 == ah[5]|ah[6]
list[0] = BN_UINT_LO_TO_HI(a[6]) | BN_UINT_HI(a[5]); // offset 0 a12|a11 == al[6]|ah[5]
carry -= (int8_t)P256SUB(t, t, list);
// Reduction chain 10, Coefficient -1
list[3] = BN_UINT_HI(a[5]); // offset 3 0|a11 == 0|ah[5]
list[2] = BN_UINT_LO_TO_HI(a[5]) | BN_UINT_HI(a[4]); // offset 2 a10|a9 == al[5]|ah[4]
// offset 1 0|a14 == 0|al[7]. Add the values of the last two chains.
list[1] = BN_UINT_LO(a[7]) + BN_UINT_HI(a[4]) + BN_UINT_LO(a[4]);
list[0] = a[4]; // offset 0 a9|a8 == ah[4]|al[4]
carry -= (int8_t)P256SUB(t, t, list);
carry += (int8_t)P256ADD(r, t, a);
return carry;
}
// SM2_P256 curve modulo parameter P. The size of a is 2*P256SIZE, and the size of r is P256SIZE
int32_t ModSm2P256(BN_BigNum *r, const BN_BigNum *a, const BN_BigNum *m, BN_Optimizer *opt)
{
(void)opt;
(void)m;
const BN_UINT *mod = MODDATASM2P256[0];
int8_t carry = ReduceSm2P256(r->data, a->data);
if (carry < 0) {
carry = (int8_t)1 - (int8_t)P256ADD(r->data, r->data, MODDATASM2P256[-carry - 1]);
carry = -carry;
} else if (carry > 0) {
// For details could ref p256.
carry = (int8_t)1 - (int8_t)P256SUB(r->data, r->data, MODDATASM2P256[carry - 1]);
}
if (carry < 0) {
P256ADD(r->data, r->data, mod);
} else if (carry > 0 || BinCmp(r->data, P256SIZE, mod, P256SIZE) >= 0) {
P256SUB(r->data, r->data, mod);
}
UpdateSize(r, P256SIZE);
return 0;
}
#endif
#elif defined(HITLS_THIRTY_TWO_BITS)
int32_t ModNistP224(BN_BigNum *r, const BN_BigNum *a, const BN_BigNum *m, BN_Optimizer *opt)
{
return BN_Mod(r, a, m, opt);
}
int32_t ModNistP256(BN_BigNum *r, const BN_BigNum *a, const BN_BigNum *m, BN_Optimizer *opt)
{
return BN_Mod(r, a, m, opt);
}
#ifdef HITLS_CRYPTO_CURVE_SM2
int32_t ModSm2P256(BN_BigNum *r, const BN_BigNum *a, const BN_BigNum *m, BN_Optimizer *opt)
{
return BN_Mod(r, a, m, opt);
}
#endif
int32_t ModNistP384(BN_BigNum *r, const BN_BigNum *a, const BN_BigNum *m, BN_Optimizer *opt)
{
return BN_Mod(r, a, m, opt);
}
int32_t ModNistP521(BN_BigNum *r, const BN_BigNum *a, const BN_BigNum *m, BN_Optimizer *opt)
{
return BN_Mod(r, a, m, opt);
}
#endif
#if defined(HITLS_CRYPTO_BN_COMBA) && defined(HITLS_SIXTY_FOUR_BITS)
static uint32_t MulNistP256P224(BN_UINT *r, uint32_t rSize, const BN_UINT *a, uint32_t aSize,
const BN_UINT *b, uint32_t bSize)
{
(void)rSize;
(void)aSize;
(void)bSize;
MulComba4(r, a, b);
uint32_t size = P224SIZE << 1; // in 64-bit environment, P224SIZE = P256SIZE
while (size > 0) {
if (r[size - 1] != 0) {
break;
}
--size;
}
return size;
}
static uint32_t SqrNistP256P224(BN_UINT *r, uint32_t rSize, const BN_UINT *a, uint32_t aSize)
{
(void)rSize;
(void)aSize;
SqrComba4(r, a);
uint32_t size = P224SIZE << 1; // in 64-bit environment, P224SIZE = P256SIZE
while (size > 0) {
if (r[size - 1] != 0) {
break;
}
--size;
}
return size;
}
static uint32_t MulNistP384(BN_UINT *r, uint32_t rSize, const BN_UINT *a, uint32_t aSize,
const BN_UINT *b, uint32_t bSize)
{
(void)rSize;
(void)aSize;
(void)bSize;
MulComba6(r, a, b);
uint32_t size = P384SIZE << 1;
while (size > 0) {
if (r[size - 1] != 0) {
break;
}
size--;
}
return size;
}
static uint32_t SqrNistP384(BN_UINT *r, uint32_t rSize, const BN_UINT *a, uint32_t aSize)
{
(void)rSize;
(void)aSize;
SqrComba6(r, a);
uint32_t size = P384SIZE << 1;
while (size > 0) {
if (r[size - 1] != 0) {
break;
}
size--;
}
return size;
}
#else
static uint32_t MulNistP256P224(BN_UINT *r, uint32_t rSize, const BN_UINT *a, uint32_t aSize,
const BN_UINT *b, uint32_t bSize)
{
return BinMul(r, rSize, a, aSize, b, bSize);
}
static uint32_t SqrNistP256P224(BN_UINT *r, uint32_t rSize, const BN_UINT *a, uint32_t aSize)
{
return BinSqr(r, rSize, a, aSize);
}
static uint32_t MulNistP384(BN_UINT *r, uint32_t rSize, const BN_UINT *a, uint32_t aSize,
const BN_UINT *b, uint32_t bSize)
{
return BinMul(r, rSize, a, aSize, b, bSize);
}
static uint32_t SqrNistP384(BN_UINT *r, uint32_t rSize, const BN_UINT *a, uint32_t aSize)
{
return BinSqr(r, rSize, a, aSize);
}
#endif
static inline int32_t ModCalParaCheck(BN_BigNum *r, const BN_BigNum *a, const BN_BigNum *b, const BN_BigNum *mod)
{
if (r == NULL || a == NULL || b == NULL || mod == NULL) {
return CRYPT_NULL_INPUT;
}
// 保证不越界访问
if ((mod->size > a->room) || (mod->size > b->room)) {
return CRYPT_BN_SPACE_NOT_ENOUGH;
}
return BnExtend(r, mod->size);
}
// The user must ensure that a < m, and a->room & b->room are not less than mod->size.
// All the data must be not negative number, otherwise the API may be not functional.
int32_t BN_ModAddQuick(BN_BigNum *r, const BN_BigNum *a, const BN_BigNum *b,
const BN_BigNum *mod, const BN_Optimizer *opt)
{
int32_t ret = ModCalParaCheck(r, a, b, mod);
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
return ret;
}
(void)opt;
BN_UINT carry = BinAdd(r->data, a->data, b->data, mod->size);
if (carry > 0 || BinCmp(r->data, mod->size, mod->data, mod->size) >= 0) {
BinSub(r->data, r->data, mod->data, mod->size);
}
UpdateSize(r, mod->size);
return CRYPT_SUCCESS;
}
// The user must ensure that a < m, and a->room & b->room are not less than mod->size.
// All the data must be not negative number, otherwise the API may be not functional.
int32_t BN_ModSubQuick(BN_BigNum *r, const BN_BigNum *a, const BN_BigNum *b,
const BN_BigNum *mod, const BN_Optimizer *opt)
{
int32_t ret = ModCalParaCheck(r, a, b, mod);
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
return ret;
}
(void)opt;
int32_t res = BinCmp(a->data, a->size, b->data, b->size);
if (res < 0) {
/* Apply for the temporary space of the BN object. */
BinSub(r->data, a->data, b->data, mod->size);
BinAdd(r->data, r->data, mod->data, mod->size);
} else {
BinSub(r->data, a->data, b->data, mod->size);
}
UpdateSize(r, mod->size);
return CRYPT_SUCCESS;
}
static inline int32_t ModEccMulParaCheck(BN_BigNum *r, const BN_BigNum *a,
const BN_BigNum *b, const BN_BigNum *mod, BN_Optimizer *opt)
{
if (r == NULL || a == NULL || b == NULL || mod == NULL || opt == NULL) {
return CRYPT_NULL_INPUT;
}
// Ensure that no out-of-bounds access occurs.
if ((mod->size > b->room) || (mod->size > a->room)) {
return CRYPT_BN_SPACE_NOT_ENOUGH;
}
return BnExtend(r, mod->size);
}
// The user must ensure that a < m, and a->room & b->room are not less than mod->size.
// All the data must be not negative number, otherwise the API may be not functional.
int32_t BN_ModNistEccMul(BN_BigNum *r, const BN_BigNum *a, const BN_BigNum *b, void *data, BN_Optimizer *opt)
{
BN_BigNum *mod = (BN_BigNum *)data;
int32_t ret = ModEccMulParaCheck(r, a, b, mod, opt);
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
return ret;
}
if (b->size == 0 || a->size == 0) {
return BN_Zeroize(r);
}
BN_UINT tData[P521SIZE << 1] = { 0 };
BN_BigNum rMul = {
.data = tData,
.size = 0,
.sign = false,
.room = P521SIZE << 1
};
uint32_t size = mod->size << 1;
uint32_t bits = BN_Bits(mod);
if (bits == 224) { // 224bit
rMul.size = MulNistP256P224(rMul.data, size, a->data, mod->size, b->data, mod->size);
ModNistP224(r, &rMul, mod, opt);
} else if (bits == 256) { // 256bit
rMul.size = MulNistP256P224(rMul.data, size, a->data, mod->size, b->data, mod->size);
ModNistP256(r, &rMul, mod, opt);
} else if (bits == 384) { // 384bit
rMul.size = MulNistP384(rMul.data, size, a->data, mod->size, b->data, mod->size);
ModNistP384(r, &rMul, mod, opt);
} else if (bits == 521) { // 521bit
rMul.size = BinMul(rMul.data, size, a->data, mod->size, b->data, mod->size);
ModNistP521(r, &rMul, mod, opt);
} else {
BSL_ERR_PUSH_ERROR(CRYPT_BN_ERR_QUICK_MODDATA);
return CRYPT_BN_ERR_QUICK_MODDATA;
}
return CRYPT_SUCCESS;
}
static int32_t ModEccSqrParaCheck(BN_BigNum *r, const BN_BigNum *a, const BN_BigNum *mod, BN_Optimizer *opt)
{
if (r == NULL || a == NULL || mod == NULL || opt == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return CRYPT_NULL_INPUT;
}
// Ensure that no out-of-bounds access occurs.
if (mod->size > a->room) {
BSL_ERR_PUSH_ERROR(CRYPT_BN_SPACE_NOT_ENOUGH);
return CRYPT_BN_SPACE_NOT_ENOUGH;
}
return BnExtend(r, mod->size);
}
// The user must ensure that a < m, and a->room & b->room are not less than mod->size.
// All the data must be not negative number, otherwise the API may be not functional.
int32_t BN_ModNistEccSqr(BN_BigNum *r, const BN_BigNum *a, void *data, BN_Optimizer *opt)
{
BN_BigNum *mod = (BN_BigNum *)data;
int32_t ret = ModEccSqrParaCheck(r, a, mod, opt);
if (ret != CRYPT_SUCCESS) {
return ret;
}
if (a->size == 0) {
return BN_Zeroize(r);
}
BN_UINT tData[P521SIZE << 1] = { 0 };
BN_BigNum rSqr = {
.data = tData,
.size = 0,
.sign = false,
.room = P521SIZE << 1
};
uint32_t size = mod->size << 1;
uint32_t bits = BN_Bits(mod);
if (bits == 224) { // 224bit
rSqr.size = SqrNistP256P224(rSqr.data, size, a->data, mod->size);
ModNistP224(r, &rSqr, mod, opt);
} else if (bits == 256) { // 256bit
rSqr.size = SqrNistP256P224(rSqr.data, size, a->data, mod->size);
ModNistP256(r, &rSqr, mod, opt);
} else if (bits == 384) { // 384bit
rSqr.size = SqrNistP384(rSqr.data, size, a->data, mod->size);
ModNistP384(r, &rSqr, mod, opt);
} else if (bits == 521) { // 521bit
rSqr.size = BinSqr(rSqr.data, size, a->data, mod->size);
ModNistP521(r, &rSqr, mod, opt);
} else {
BSL_ERR_PUSH_ERROR(CRYPT_BN_ERR_QUICK_MODDATA);
return CRYPT_BN_ERR_QUICK_MODDATA;
}
return CRYPT_SUCCESS;
}
#ifdef HITLS_CRYPTO_CURVE_SM2
#define SM2SIZE SIZE_OF_BNUINT(256)
// The user must ensure that a < m, and a->room & b->room are not less than mod->size.
// All the data must be not negative number, otherwise the API may be not functional.
int32_t BN_ModSm2EccMul(BN_BigNum *r, const BN_BigNum *a, const BN_BigNum *b, void *data, BN_Optimizer *opt)
{
BN_BigNum *mod = (BN_BigNum *)data;
int32_t ret = ModEccMulParaCheck(r, a, b, mod, opt);
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
return ret;
}
if (a->size == 0 || b->size == 0) {
return BN_Zeroize(r);
}
BN_UINT tData[SM2SIZE << 1] = { 0 };
BN_BigNum rMul = {
.data = tData,
.size = 0,
.sign = false,
.room = SM2SIZE << 1
};
uint32_t size = mod->size << 1;
rMul.size = MulNistP256P224(rMul.data, size, a->data, mod->size, b->data, mod->size);
ModSm2P256(r, &rMul, mod, opt);
return CRYPT_SUCCESS;
}
// The user must ensure that a < m, and a->room & b->room are not less than mod->size.
// All the data must be not negative number, otherwise the API may be not functional.
int32_t BN_ModSm2EccSqr(BN_BigNum *r, const BN_BigNum *a, void *data, BN_Optimizer *opt)
{
BN_BigNum *mod = (BN_BigNum *)data;
int32_t ret = ModEccSqrParaCheck(r, a, mod, opt);
if (ret != CRYPT_SUCCESS) {
return ret;
}
if (a->size == 0) {
return BN_Zeroize(r);
}
BN_UINT tData[SM2SIZE << 1] = { 0 };
BN_BigNum rSqr = {
.data = tData,
.size = 0,
.sign = false,
.room = SM2SIZE << 1
};
uint32_t size = mod->size << 1;
rSqr.size = SqrNistP256P224(rSqr.data, size, a->data, mod->size);
ModSm2P256(r, &rSqr, mod, opt);
return CRYPT_SUCCESS;
}
#endif
#endif | 2302_82127028/openHiTLS-examples_1508 | crypto/bn/src/bn_nistmod.c | C | unknown | 45,086 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#include "hitls_build.h"
#ifdef HITLS_CRYPTO_BN
#include "securec.h"
#include "bsl_sal.h"
#include "bsl_err_internal.h"
#include "crypt_errno.h"
#include "crypt_utils.h"
#include "bn_basic.h"
#include "bn_bincal.h"
#include "bn_ucal.h"
#include "bn_optimizer.h"
#define SMALL_CONQUER_SIZE 8
int32_t BN_Cmp(const BN_BigNum *a, const BN_BigNum *b)
{
if (a == NULL || b == NULL) {
if (a != NULL) {
return -1;
}
if (b != NULL) {
return 1;
}
return 0;
}
if (a->sign != b->sign) {
return a->sign == false ? 1 : -1;
}
if (a->sign == true) {
return BinCmp(b->data, b->size, a->data, a->size);
}
return BinCmp(a->data, a->size, b->data, b->size);
}
int32_t BN_Add(BN_BigNum *r, const BN_BigNum *a, const BN_BigNum *b)
{
if (r == NULL || a == NULL || b == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return CRYPT_NULL_INPUT;
}
if (a->sign == b->sign) {
r->sign = a->sign;
return UAdd(r, a, b);
}
// compare absolute value
int32_t res = BinCmp(a->data, a->size, b->data, b->size);
if (res > 0) {
r->sign = a->sign;
return USub(r, a, b);
} else if (res < 0) {
r->sign = b->sign;
return USub(r, b, a);
}
return BN_Zeroize(r);
}
int32_t BN_AddLimb(BN_BigNum *r, const BN_BigNum *a, BN_UINT w)
{
if (r == NULL || a == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return CRYPT_NULL_INPUT;
}
if (a->size == 0) {
return BN_SetLimb(r, w);
}
int32_t ret;
if (a->sign == false) { // a is positive
ret = BnExtend(r, a->size + 1);
if (ret != CRYPT_SUCCESS) {
return ret;
}
BN_UINT carry = BinInc(r->data, a->data, a->size, w);
if (carry != 0) {
uint32_t size = a->size;
r->size = size + 1;
r->data[size] = carry;
} else {
r->size = a->size;
}
r->sign = false;
return CRYPT_SUCCESS;
}
ret = BnExtend(r, a->size);
if (ret != CRYPT_SUCCESS) {
return ret;
}
if (a->size == 1) {
if (a->data[0] > w) {
r->sign = true;
r->data[0] = a->data[0] - w;
r->size = 1;
} else if (a->data[0] == w) {
r->sign = false;
r->data[0] = 0;
r->size = 0;
} else {
r->sign = false;
r->data[0] = w - a->data[0];
r->size = 1;
}
return CRYPT_SUCCESS;
}
r->sign = true;
UDec(r, a, w);
return CRYPT_SUCCESS;
}
int32_t BN_Sub(BN_BigNum *r, const BN_BigNum *a, const BN_BigNum *b)
{
if (r == NULL || a == NULL || b == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return CRYPT_NULL_INPUT;
}
if (a->sign != b->sign) {
r->sign = a->sign;
return UAdd(r, a, b);
}
// compare absolute value
int32_t res = BinCmp(a->data, a->size, b->data, b->size);
if (res == 0) {
return BN_Zeroize(r);
} else if (res > 0) {
r->sign = a->sign;
return USub(r, a, b);
}
r->sign = !b->sign;
return USub(r, b, a);
}
int32_t BN_SubLimb(BN_BigNum *r, const BN_BigNum *a, BN_UINT w)
{
if (r == NULL || a == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return CRYPT_NULL_INPUT;
}
int32_t ret;
if (a->size == 0) {
if (BN_SetLimb(r, w) != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(CRYPT_MEM_ALLOC_FAIL);
return CRYPT_MEM_ALLOC_FAIL;
}
r->sign = (w == 0) ? false : true;
return CRYPT_SUCCESS;
}
if (a->sign == true) {
ret = BnExtend(r, a->size + 1);
if (ret != CRYPT_SUCCESS) {
return ret;
}
BN_UINT carry = BinInc(r->data, a->data, a->size, w);
if (carry != 0) {
uint32_t size = a->size;
r->data[size] = carry;
r->size = size + 1;
} else {
r->size = a->size;
}
r->sign = true;
return CRYPT_SUCCESS;
}
ret = BnExtend(r, a->size);
if (ret != CRYPT_SUCCESS) {
return ret;
}
if (a->size == 1) {
if (a->data[0] >= w) {
r->data[0] = a->data[0] - w;
r->size = BinFixSize(r->data, 1);
} else {
r->sign = true;
r->data[0] = w - a->data[0];
r->size = 1;
}
return CRYPT_SUCCESS;
}
r->sign = false;
UDec(r, a, w);
return CRYPT_SUCCESS;
}
#ifdef HITLS_CRYPTO_BN_COMBA
static int32_t BnMulConquer(BN_BigNum *t, const BN_BigNum *a, const BN_BigNum *b, BN_Optimizer *opt)
{
if (a->size <= SMALL_CONQUER_SIZE && a->size % 2 == 0) { // 2 is to check if a->size is even
MulConquer(t->data, a->data, b->data, a->size, NULL, false);
} else {
BN_BigNum *tmpBn = OptimizerGetBn(opt, SpaceSize(a->size));
if (tmpBn == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_BN_OPTIMIZER_GET_FAIL);
return CRYPT_BN_OPTIMIZER_GET_FAIL;
}
MulConquer(t->data, a->data, b->data, a->size, tmpBn->data, false);
}
t->size = a->size + b->size;
return CRYPT_SUCCESS;
}
#endif
int32_t BN_Mul(BN_BigNum *r, const BN_BigNum *a, const BN_BigNum *b, BN_Optimizer *opt)
{
if (r == NULL || a == NULL || b == NULL || opt == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return CRYPT_NULL_INPUT;
}
if (a->size == 0 || b->size == 0) {
return BN_Zeroize(r);
}
uint32_t size = a->size + b->size;
int32_t ret = BnExtend(r, size);
if (ret != CRYPT_SUCCESS) {
return ret;
}
ret = OptimizerStart(opt); // using the Optimizer
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
return ret;
}
BN_BigNum *t = NULL;
if (r == a || r == b) {
t = OptimizerGetBn(opt, r->room); // apply for a BN object
if (t == NULL) {
OptimizerEnd(opt); // release occupation from the optimizer
BSL_ERR_PUSH_ERROR(CRYPT_BN_OPTIMIZER_GET_FAIL);
return CRYPT_BN_OPTIMIZER_GET_FAIL;
}
} else {
t = r;
}
t->sign = a->sign != b->sign;
#ifdef HITLS_CRYPTO_BN_COMBA
if (a->size == b->size) {
ret = BnMulConquer(t, a, b, opt);
if (ret != CRYPT_SUCCESS) {
OptimizerEnd(opt);
return ret;
}
} else {
#endif
t->size = BinMul(t->data, t->room, a->data, a->size, b->data, b->size);
#ifdef HITLS_CRYPTO_BN_COMBA
}
#endif
if (r != t) {
ret = BN_Copy(r, t);
if (ret != CRYPT_SUCCESS) {
OptimizerEnd(opt); // release occupation from the optimizer
BSL_ERR_PUSH_ERROR(ret);
return ret;
}
}
r->size = BinFixSize(r->data, size);
OptimizerEnd(opt);
return CRYPT_SUCCESS;
}
int32_t BN_MulLimb(BN_BigNum *r, const BN_BigNum *a, const BN_UINT w)
{
if (r == NULL || a == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return CRYPT_NULL_INPUT;
}
if (BN_Bits(a) == 0 || w == 0) {
return BN_Zeroize(r);
}
int32_t ret = BnExtend(r, a->size + 1);
if (ret != CRYPT_SUCCESS) {
return ret;
}
BN_UINT carry = 0;
uint32_t loc;
for (loc = 0; loc < a->size; loc++) {
BN_UINT rh;
BN_UINT rl;
MUL_AB(rh, rl, a->data[loc], w);
ADD_AB(carry, r->data[loc], rl, carry);
carry += rh;
}
if (carry != 0) {
r->data[loc++] = carry; // Input parameter checking ensures that no out-of-bounds
}
r->sign = a->sign;
r->size = loc;
return CRYPT_SUCCESS;
}
int32_t BN_Sqr(BN_BigNum *r, const BN_BigNum *a, BN_Optimizer *opt)
{
if (r == NULL || a == NULL || opt == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return CRYPT_NULL_INPUT;
}
if (a->size == 0) {
return BN_Zeroize(r);
}
int32_t ret = BnExtend(r, a->size * 2); // The maximum bit required for mul is 2x that of a.
if (ret != CRYPT_SUCCESS) {
return ret;
}
ret = OptimizerStart(opt); // using the Optimizer
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
return ret;
}
#ifdef HITLS_CRYPTO_BN_COMBA
if (a->size <= SMALL_CONQUER_SIZE && a->size % 2 == 0) { // 2 is to check if a->size is even.
SqrConquer(r->data, a->data, a->size, NULL, false);
} else {
BN_BigNum *tmpBn = OptimizerGetBn(opt, SpaceSize(a->size));
if (tmpBn == NULL) {
OptimizerEnd(opt);
BSL_ERR_PUSH_ERROR(CRYPT_BN_OPTIMIZER_GET_FAIL);
return CRYPT_BN_OPTIMIZER_GET_FAIL;
}
SqrConquer(r->data, a->data, a->size, tmpBn->data, false);
}
#else
BinSqr(r->data, a->size << 1, a->data, a->size);
#endif
r->size = BinFixSize(r->data, a->size * 2); // The r->data size is a->size * 2.
r->sign = false; // The square must be positive.
OptimizerEnd(opt);
return CRYPT_SUCCESS;
}
int32_t DivInputCheck(const BN_BigNum *q, const BN_BigNum *r, const BN_BigNum *x,
const BN_BigNum *y, const BN_Optimizer *opt)
{
if (x == NULL || y == NULL || opt == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return CRYPT_NULL_INPUT;
}
if (q == r) {
BSL_ERR_PUSH_ERROR(CRYPT_INVALID_ARG);
return CRYPT_INVALID_ARG;
}
// The divisor cannot be 0.
if (y->size == 0) {
return CRYPT_BN_ERR_DIVISOR_ZERO;
}
return CRYPT_SUCCESS;
}
// If x <= y, perform special processing.
int32_t DivSimple(BN_BigNum *q, BN_BigNum *r, const BN_BigNum *x, const BN_BigNum *y, int32_t flag)
{
int32_t ret;
if (flag < 0) {
if (r != NULL) {
ret = BN_Copy(r, x);
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
return ret;
}
}
if (q != NULL) {
return BN_Zeroize(q);
}
} else {
if (q != NULL) {
bool sign = (x->sign != y->sign);
ret = BN_SetLimb(q, 1);
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
return ret;
}
q->sign = sign;
}
if (r != NULL) {
return BN_Zeroize(r);
}
}
return CRYPT_SUCCESS;
}
int32_t BN_Div(BN_BigNum *q, BN_BigNum *r, const BN_BigNum *x, const BN_BigNum *y, BN_Optimizer *opt)
{
int32_t ret = DivInputCheck(q, r, x, y, opt);
if (ret != CRYPT_SUCCESS) {
return ret;
}
ret = BinCmp(x->data, x->size, y->data, y->size);
if (ret <= 0) { // simple processing when dividend <= divisor
return DivSimple(q, r, x, y, ret);
}
ret = OptimizerStart(opt); // using the Optimizer
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
return ret;
}
/* Apply for temporary space for the q and r of the BN. */
BN_BigNum *qTmp = OptimizerGetBn(opt, x->size + 2); // BinDiv:x->room >= xSize + 2
BN_BigNum *rTmp = OptimizerGetBn(opt, x->size + 2); // BinDiv:x->room >= xSize + 2
BN_BigNum *yTmp = OptimizerGetBn(opt, y->size);
if (qTmp == NULL || rTmp == NULL || yTmp == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_BN_OPTIMIZER_GET_FAIL);
ret = CRYPT_BN_OPTIMIZER_GET_FAIL;
goto err;
}
(void)memcpy_s(yTmp->data, y->size * sizeof(BN_UINT), y->data, y->size * sizeof(BN_UINT));
(void)memcpy_s(rTmp->data, x->size * sizeof(BN_UINT), x->data, x->size * sizeof(BN_UINT));
rTmp->sign = x->sign;
rTmp->size = BinDiv(qTmp->data, &(qTmp->size), rTmp->data, x->size, yTmp->data, y->size);
if (q != NULL) {
ret = BnExtend(q, qTmp->size);
if (ret != CRYPT_SUCCESS) {
goto err;
}
q->sign = (x->sign != y->sign);
(void)memcpy_s(q->data, qTmp->size * sizeof(BN_UINT), qTmp->data, qTmp->size * sizeof(BN_UINT));
q->size = qTmp->size;
}
if (r != NULL) {
ret = BnExtend(r, rTmp->size);
if (ret != CRYPT_SUCCESS) {
goto err;
}
r->sign = (rTmp->size == 0) ? false : rTmp->sign; // The symbol can only be positive when the value is 0.
(void)memcpy_s(r->data, rTmp->size * sizeof(BN_UINT), rTmp->data, rTmp->size * sizeof(BN_UINT));
r->size = rTmp->size;
}
err:
OptimizerEnd(opt); // release occupation from the optimizer
return ret;
}
int32_t DivLimbInputCheck(const BN_BigNum *q, const BN_UINT *r, const BN_BigNum *x, const BN_UINT y)
{
if (x == NULL || (q == NULL && r == NULL)) { // q and r cannot be NULL at the same time
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return CRYPT_NULL_INPUT;
}
if (y == 0) {
BSL_ERR_PUSH_ERROR(CRYPT_BN_ERR_DIVISOR_ZERO);
return CRYPT_BN_ERR_DIVISOR_ZERO;
}
return CRYPT_SUCCESS;
}
int32_t BN_DivLimb(BN_BigNum *q, BN_UINT *r, const BN_BigNum *x, const BN_UINT y)
{
int32_t ret = DivLimbInputCheck(q, r, x, y);
if (ret != CRYPT_SUCCESS) {
return ret;
}
// Apply for a copy of object x.
BN_BigNum *xTmp = BN_Dup(x);
if (xTmp == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_MEM_ALLOC_FAIL);
return CRYPT_MEM_ALLOC_FAIL;
}
BN_UINT rem = 0;
BN_UINT yTmp = y;
uint32_t shifts;
if (x->size == 0) {
goto end;
}
shifts = GetZeroBitsUint(yTmp);
if (shifts != 0) {
yTmp <<= shifts; // Ensure that the most significant bit of the divisor is 1.
ret = BN_Lshift(xTmp, xTmp, shifts);
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
BN_Destroy(xTmp);
return ret;
}
}
for (int32_t i = (int32_t)(xTmp->size - 1); i >= 0; i--) {
BN_UINT quo;
DIV_ND(quo, rem, rem, xTmp->data[i], yTmp);
xTmp->data[i] = quo;
}
xTmp->size = BinFixSize(xTmp->data, xTmp->size);
if (xTmp->size == 0) {
xTmp->sign = 0;
}
rem >>= shifts;
end:
if (q != NULL) {
ret = BN_Copy(q, xTmp);
if (ret != CRYPT_SUCCESS) {
BN_Destroy(xTmp);
BSL_ERR_PUSH_ERROR(ret);
return ret;
}
}
if (r != NULL) {
*r = rem;
}
BN_Destroy(xTmp);
return ret;
}
int32_t BN_Mod(BN_BigNum *r, const BN_BigNum *a, const BN_BigNum *m, BN_Optimizer *opt)
{
// check input parameters
if (r == NULL || a == NULL || m == NULL || opt == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return CRYPT_NULL_INPUT;
}
if (m->size == 0) {
BSL_ERR_PUSH_ERROR(CRYPT_BN_ERR_DIVISOR_ZERO);
return CRYPT_BN_ERR_DIVISOR_ZERO;
}
int32_t ret = BnExtend(r, m->size);
if (ret != CRYPT_SUCCESS) {
return ret;
}
ret = OptimizerStart(opt);
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
return ret;
}
BN_BigNum *t = OptimizerGetBn(opt, m->size);
if (t == NULL) {
OptimizerEnd(opt);
BSL_ERR_PUSH_ERROR(CRYPT_BN_OPTIMIZER_GET_FAIL);
return CRYPT_BN_OPTIMIZER_GET_FAIL;
}
ret = BN_Div(NULL, t, a, m, opt);
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
OptimizerEnd(opt);
return ret;
}
// t is a positive number
if (t->sign == false) {
ret = BN_Copy(r, t);
OptimizerEnd(opt);
return ret;
}
// When t is a negative number, the modulo operation result must be positive.
if (m->sign == true) { // m is a negative number
ret = BN_Sub(r, t, m);
} else { // m is a positive number
ret = BN_Add(r, t, m);
}
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
}
OptimizerEnd(opt);
return ret;
}
int32_t BN_ModLimb(BN_UINT *r, const BN_BigNum *a, const BN_UINT m)
{
if (r == NULL || a == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return CRYPT_NULL_INPUT;
}
if (m == 0) {
BSL_ERR_PUSH_ERROR(CRYPT_BN_ERR_DIVISOR_ZERO);
return CRYPT_BN_ERR_DIVISOR_ZERO;
}
if (a->size == 0) {
*r = 0;
return CRYPT_SUCCESS;
}
int32_t ret = BN_DivLimb(NULL, r, a, m);
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
return ret;
}
if (a->sign) {
*r = m - *r;
}
return ret;
}
// Check the input parameters of basic operations such as modulo addition, subtraction, and multiplication.
int32_t ModBaseInputCheck(BN_BigNum *r, const BN_BigNum *a, const BN_BigNum *b,
const BN_BigNum *mod, const BN_Optimizer *opt)
{
if (r == NULL || a == NULL || b == NULL || mod == NULL || opt == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return CRYPT_NULL_INPUT;
}
int32_t ret = BnExtend(r, mod->size);
if (ret != CRYPT_SUCCESS) {
return ret;
}
// mod cannot be 0
if (BN_IsZero(mod)) {
BSL_ERR_PUSH_ERROR(CRYPT_BN_ERR_DIVISOR_ZERO);
return CRYPT_BN_ERR_DIVISOR_ZERO;
}
return CRYPT_SUCCESS;
}
int32_t BN_ModSub(BN_BigNum *r, const BN_BigNum *a, const BN_BigNum *b, const BN_BigNum *mod, BN_Optimizer *opt)
{
int32_t ret;
ret = ModBaseInputCheck(r, a, b, mod, opt);
if (ret != CRYPT_SUCCESS) {
return ret;
}
ret = OptimizerStart(opt); // using the Optimizer
if (ret != CRYPT_SUCCESS) {
return ret;
}
/* Difference: Apply for the temporary space of the BN object. */
uint32_t subTmpSize = (a->size > b ->size) ? a->size : b->size;
BN_BigNum *t = OptimizerGetBn(opt, subTmpSize);
if (t == NULL) {
ret = CRYPT_BN_OPTIMIZER_GET_FAIL;
BSL_ERR_PUSH_ERROR(ret);
goto err;
}
ret = BN_Sub(t, a, b);
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
goto err;
}
ret = BN_Mod(r, t, mod, opt);
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
}
err:
OptimizerEnd(opt); // release occupation from the optimizer
return ret;
}
int32_t BN_ModAdd(BN_BigNum *r, const BN_BigNum *a, const BN_BigNum *b, const BN_BigNum *mod, BN_Optimizer *opt)
{
int32_t ret;
ret = ModBaseInputCheck(r, a, b, mod, opt);
if (ret != CRYPT_SUCCESS) {
return ret;
}
ret = OptimizerStart(opt); // using the Optimizer
if (ret != CRYPT_SUCCESS) {
return ret;
}
/* Difference: Apply for the temporary space of the BN object. */
uint32_t addTmpSize = (a->size > b ->size) ? a->size : b->size;
BN_BigNum *t = OptimizerGetBn(opt, addTmpSize);
if (t == NULL) {
ret = CRYPT_BN_OPTIMIZER_GET_FAIL;
BSL_ERR_PUSH_ERROR(ret);
goto err;
}
ret = BN_Add(t, a, b);
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
goto err;
}
ret = BN_Mod(r, t, mod, opt);
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
}
err:
OptimizerEnd(opt); // release occupation from the optimizer
return ret;
}
int32_t BN_ModMul(BN_BigNum *r, const BN_BigNum *a, const BN_BigNum *b, const BN_BigNum *mod, BN_Optimizer *opt)
{
int32_t ret;
ret = ModBaseInputCheck(r, a, b, mod, opt);
if (ret != CRYPT_SUCCESS) {
return ret;
}
ret = OptimizerStart(opt); // using the Optimizer
if (ret != CRYPT_SUCCESS) {
return ret;
}
/* Apply for the temporary space of the BN object. */
BN_BigNum *t = OptimizerGetBn(opt, a->size + b->size + 1);
if (t == NULL) {
ret = CRYPT_BN_OPTIMIZER_GET_FAIL;
BSL_ERR_PUSH_ERROR(ret);
goto err;
}
ret = BN_Mul(t, a, b, opt);
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
goto err;
}
ret = BN_Mod(r, t, mod, opt);
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
}
err:
OptimizerEnd(opt); // release occupation from the optimizer
return ret;
}
int32_t BN_ModSqr(BN_BigNum *r, const BN_BigNum *a, const BN_BigNum *mod, BN_Optimizer *opt)
{
bool invalidInput = (r == NULL || a == NULL || mod == NULL || opt == NULL);
if (invalidInput) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return CRYPT_NULL_INPUT;
}
// mod cannot be 0
if (BN_IsZero(mod)) {
BSL_ERR_PUSH_ERROR(CRYPT_BN_ERR_DIVISOR_ZERO);
return CRYPT_BN_ERR_DIVISOR_ZERO;
}
int32_t ret = BnExtend(r, mod->size);
if (ret != CRYPT_SUCCESS) {
return ret;
}
ret = OptimizerStart(opt); // using the Optimizer
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
return ret;
}
/* Apply for the temporary space of the BN object. */
BN_BigNum *t = OptimizerGetBn(opt, (a->size << 1) + 1);
if (t == NULL) {
ret = CRYPT_BN_OPTIMIZER_GET_FAIL;
BSL_ERR_PUSH_ERROR(ret);
goto err;
}
ret = BN_Sqr(t, a, opt);
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
goto err;
}
ret = BN_Mod(r, t, mod, opt);
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
}
err:
OptimizerEnd(opt); // release occupation from the optimizer
return ret;
}
int32_t ModExpInputCheck(BN_BigNum *r, const BN_BigNum *a, const BN_BigNum *e,
const BN_BigNum *m, const BN_Optimizer *opt)
{
bool invalidInput = (r == NULL || a == NULL || e == NULL || m == NULL || opt == NULL);
if (invalidInput) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return CRYPT_NULL_INPUT;
}
// mod cannot be 0
if (BN_IsZero(m)) {
BSL_ERR_PUSH_ERROR(CRYPT_BN_ERR_DIVISOR_ZERO);
return CRYPT_BN_ERR_DIVISOR_ZERO;
}
// the power cannot be negative
if (e->sign == true) {
BSL_ERR_PUSH_ERROR(CRYPT_BN_ERR_EXP_NO_NEGATIVE);
return CRYPT_BN_ERR_EXP_NO_NEGATIVE;
}
return BnExtend(r, m->size);
}
int32_t ModExpCore(BN_BigNum *x, BN_BigNum *y, const BN_BigNum *e, const BN_BigNum *m, BN_Optimizer *opt)
{
int32_t ret;
if (BN_GetBit(e, 0) == 1) {
(void)BN_Copy(x, y); // ignores the returned value, we can ensure that no error occurs when applying memory
} else { // set the value to 1
(void)BN_SetLimb(x, 1); // ignores the returned value, we can ensure that no error occurs when applying memory
}
uint32_t bits = BN_Bits(e);
for (uint32_t i = 1; i < bits; i++) {
ret = BN_ModSqr(y, y, m, opt); // y is a temporary variable, which is multiplied by x
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
return ret;
}
if (BN_GetBit(e, i) == 1) {
ret = BN_ModMul(x, x, y, m, opt); // x^1101 = x^1 * x^100 * x^1000
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
return ret;
}
}
}
return CRYPT_SUCCESS;
}
static int32_t SwitchMont(BN_BigNum *r, const BN_BigNum *a, const BN_BigNum *e, const BN_BigNum *m, BN_Optimizer *opt)
{
BN_Mont *mont = BN_MontCreate(m);
if (mont == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return CRYPT_NULL_INPUT;
}
int32_t ret = BN_MontExp(r, a, e, mont, opt);
BN_MontDestroy(mont);
return ret;
}
int32_t BN_ModExp(BN_BigNum *r, const BN_BigNum *a, const BN_BigNum *e, const BN_BigNum *m, BN_Optimizer *opt)
{
int32_t ret = ModExpInputCheck(r, a, e, m, opt);
if (ret != CRYPT_SUCCESS) {
return ret;
}
// When m = 1 or -1
if (m->size == 1 && m->data[0] == 1) {
return BN_Zeroize(r);
}
if (BN_IsOdd(m) && !BN_IsNegative(m)) {
return SwitchMont(r, a, e, m, opt);
}
ret = OptimizerStart(opt); // using the Optimizer
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
return ret;
}
/* Apply for the temporary space of the BN object. */
BN_BigNum *x = OptimizerGetBn(opt, m->size);
BN_BigNum *y = OptimizerGetBn(opt, m->size);
if (x == NULL || y == NULL) {
OptimizerEnd(opt); // release occupation from the optimizer
BSL_ERR_PUSH_ERROR(CRYPT_BN_OPTIMIZER_GET_FAIL);
return CRYPT_BN_OPTIMIZER_GET_FAIL;
}
// step 1: Obtain the modulus once, and then determine the power and remainder.
ret = BN_Mod(y, a, m, opt);
if (ret != CRYPT_SUCCESS) {
OptimizerEnd(opt);
BSL_ERR_PUSH_ERROR(ret);
return ret;
}
// step2: check the power. Any number to the power of 0 is 1. (0 to the power of 0 to the power of 0)
if (BN_IsZero(e) || BN_IsOne(y)) {
OptimizerEnd(opt);
return BN_SetLimb(r, 1);
}
// step3: The remainder is 0 and the result must be 0.
if (BN_IsZero(y)) {
OptimizerEnd(opt); // release occupation from the optimizer
return BN_Zeroize(r);
}
/* Power factorization: e binary x^1101 = x^1 * x^100 * x^1000
e Decimal x^13 = x^1 * x^4 * x^8 */
ret = ModExpCore(x, y, e, m, opt);
if (ret != CRYPT_SUCCESS) {
OptimizerEnd(opt);
BSL_ERR_PUSH_ERROR(ret);
return ret;
}
ret = BN_Copy(r, x);
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
}
OptimizerEnd(opt); // release occupation from the optimizer
return ret;
}
int32_t BN_Rshift(BN_BigNum *r, const BN_BigNum *a, uint32_t n)
{
if (r == NULL || a == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return CRYPT_NULL_INPUT;
}
if (BN_Bits(a) <= n) {
return BN_Zeroize(r);
}
int32_t ret = BnExtend(r, BITS_TO_BN_UNIT(BN_Bits(a) - n));
if (ret != CRYPT_SUCCESS) {
return ret;
}
r->sign = a->sign;
uint32_t size = BinRshift(r->data, a->data, a->size, n);
if (size < r->size) {
if (memset_s(r->data + size, (r->room - size) * sizeof(BN_UINT), 0,
(r->size - size) * sizeof(BN_UINT)) != EOK) {
BSL_ERR_PUSH_ERROR(CRYPT_SECUREC_FAIL);
return CRYPT_SECUREC_FAIL;
}
}
r->size = size;
return CRYPT_SUCCESS;
}
int32_t BN_Lshift(BN_BigNum *r, const BN_BigNum *a, uint32_t n)
{
if (r == NULL || a == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return CRYPT_NULL_INPUT;
}
uint32_t incUnit = n % BN_UINT_BITS == 0 ? (n / BN_UINT_BITS) : ((n / BN_UINT_BITS) + 1);
int32_t ret = BnExtend(r, a->size + incUnit);
if (ret != CRYPT_SUCCESS) {
return ret;
}
if (a->size != 0) {
r->size = BinLshift(r->data, a->data, a->size, n);
} else {
(void)BN_Zeroize(r);
}
r->sign = a->sign;
return CRYPT_SUCCESS;
}
#ifdef HITLS_CRYPTO_ECC
// '~mask' is the mask of a and 'mask' is the mask of b.
int32_t BN_CopyWithMask(BN_BigNum *r, const BN_BigNum *a, const BN_BigNum *b,
BN_UINT mask)
{
if (r == NULL || a == NULL || b == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return CRYPT_NULL_INPUT;
}
if ((a->room != r->room) || (b->room != r->room)) {
BSL_ERR_PUSH_ERROR(CRYPT_BN_ERR_MASKCOPY_LEN);
return CRYPT_BN_ERR_MASKCOPY_LEN;
}
BN_UINT rmask = ~mask;
uint32_t len = r->room;
BN_UINT *dst = r->data;
BN_UINT *srcA = a->data;
BN_UINT *srcB = b->data;
for (uint32_t i = 0; i < len; i++) {
dst[i] = (srcA[i] & rmask) ^ (srcB[i] & mask);
}
r->sign = (mask != 0) ? (a->sign) : (b->sign);
r->size = (a->size & (uint32_t)rmask) ^ (b->size & (uint32_t)mask);
return CRYPT_SUCCESS;
}
#endif
#if defined(HITLS_CRYPTO_ECC) && defined(HITLS_CRYPTO_CURVE_MONT)
/* Invoked by the ECC module and the sign can be ignored.
* if mask = BN_MASK, a, b --> b, a
* if mask = 0, a, b --> a, b
*/
int32_t BN_SwapWithMask(BN_BigNum *a, BN_BigNum *b, BN_UINT mask)
{
if (a == NULL || b == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return CRYPT_NULL_INPUT;
}
if (a->room != b->room) {
BSL_ERR_PUSH_ERROR(CRYPT_BN_ERR_SWAP_LEN);
return CRYPT_BN_ERR_SWAP_LEN;
}
BN_UINT rmask = ~mask;
BN_UINT *srcA = a->data;
BN_UINT *srcB = b->data;
BN_UINT tmp1;
BN_UINT tmp2;
for (uint32_t i = 0; i < a->room; i++) {
tmp1 = srcA[i];
tmp2 = srcB[i];
srcA[i] = (tmp1 & rmask) | (tmp2 & mask);
srcB[i] = (tmp2 & rmask) | (tmp1 & mask);
}
tmp1 = a->size;
tmp2 = b->size;
a->size = (tmp1 & (uint32_t)rmask) | (tmp2 & (uint32_t)mask);
b->size = (tmp2 & (uint32_t)rmask) | (tmp1 & (uint32_t)mask);
return CRYPT_SUCCESS;
}
#endif // HITLS_CRYPTO_ECC and HITLS_CRYPTO_CURVE_MONT
#endif /* HITLS_CRYPTO_BN */
| 2302_82127028/openHiTLS-examples_1508 | crypto/bn/src/bn_operation.c | C | unknown | 29,125 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#include "hitls_build.h"
#ifdef HITLS_CRYPTO_BN
#include <stdint.h>
#include <string.h>
#include "securec.h"
#include "bsl_err_internal.h"
#include "bsl_sal.h"
#include "crypt_errno.h"
#include "bn_optimizer.h"
BN_Optimizer *BN_OptimizerCreate(void)
{
BN_Optimizer *opt = BSL_SAL_Calloc(1u, sizeof(BN_Optimizer));
if (opt == NULL) {
return NULL;
}
opt->curChunk = BSL_SAL_Calloc(1u, sizeof(Chunk));
if (opt->curChunk == NULL) {
BSL_SAL_FREE(opt);
return NULL;
}
return opt;
}
void BN_OptimizerSetLibCtx(void *libCtx, BN_Optimizer *opt)
{
opt->libCtx = libCtx;
}
void *BN_OptimizerGetLibCtx(BN_Optimizer *opt)
{
return opt->libCtx;
}
void BN_OptimizerDestroy(BN_Optimizer *opt)
{
if (opt == NULL) {
return;
}
Chunk *curChunk = opt->curChunk;
Chunk *nextChunk = curChunk->next;
Chunk *prevChunk = curChunk->prev;
while (nextChunk != NULL) {
for (uint32_t i = 0; i < HITLS_CRYPT_OPTIMIZER_BN_NUM; i++) {
BSL_SAL_CleanseData((void *)(nextChunk->bigNums[i].data), nextChunk->bigNums[i].size * sizeof(BN_UINT));
BSL_SAL_FREE(nextChunk->bigNums[i].data);
}
Chunk *tmp = nextChunk->next;
BSL_SAL_Free(nextChunk);
nextChunk = tmp;
}
while (prevChunk != NULL) {
for (uint32_t i = 0; i < HITLS_CRYPT_OPTIMIZER_BN_NUM; i++) {
BSL_SAL_CleanseData((void *)(prevChunk->bigNums[i].data), prevChunk->bigNums[i].size * sizeof(BN_UINT));
BSL_SAL_FREE(prevChunk->bigNums[i].data);
}
Chunk *tmp = prevChunk->prev;
BSL_SAL_Free(prevChunk);
prevChunk = tmp;
}
// curChunk != NULL
for (uint32_t i = 0; i < HITLS_CRYPT_OPTIMIZER_BN_NUM; i++) {
BSL_SAL_CleanseData((void *)(curChunk->bigNums[i].data), curChunk->bigNums[i].size * sizeof(BN_UINT));
BSL_SAL_FREE(curChunk->bigNums[i].data);
}
BSL_SAL_Free(curChunk);
BSL_SAL_Free(opt);
}
int32_t OptimizerStart(BN_Optimizer *opt)
{
if (opt->deep != CRYPT_OPTIMIZER_MAXDEEP) {
opt->deep++;
return CRYPT_SUCCESS;
}
BSL_ERR_PUSH_ERROR(CRYPT_BN_OPTIMIZER_STACK_FULL);
return CRYPT_BN_OPTIMIZER_STACK_FULL;
}
/* create a new room that has not been initialized */
static BN_BigNum *GetPresetBn(BN_Optimizer *opt, Chunk *curChunk)
{
if (curChunk->occupied != HITLS_CRYPT_OPTIMIZER_BN_NUM) {
curChunk->occupied++;
return &curChunk->bigNums[curChunk->occupied - 1];
}
if (curChunk->prev != NULL) {
opt->curChunk = curChunk->prev;
opt->curChunk->occupied++; // new chunk and occupied = 0;
return &opt->curChunk->bigNums[opt->curChunk->occupied - 1];
}
// We has used all chunks.
Chunk *newChunk = BSL_SAL_Calloc(1u, sizeof(Chunk));
if (newChunk == NULL) {
return NULL;
}
newChunk->next = curChunk;
curChunk->prev = newChunk;
opt->curChunk = newChunk;
newChunk->occupied++;
return &newChunk->bigNums[newChunk->occupied - 1];
}
static int32_t BnMake(BN_BigNum *r, uint32_t room)
{
if (r->room < room) {
if (room > BITS_TO_BN_UNIT(BN_MAX_BITS)) {
BSL_ERR_PUSH_ERROR(CRYPT_BN_BITS_TOO_MAX);
return CRYPT_BN_BITS_TOO_MAX;
}
BN_UINT *tmp = (BN_UINT *)BSL_SAL_Calloc(1u, room * sizeof(BN_UINT));
if (tmp == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_MEM_ALLOC_FAIL);
return CRYPT_MEM_ALLOC_FAIL;
}
if (r->size > 0) {
BSL_SAL_CleanseData(r->data, r->size * sizeof(BN_UINT));
}
BSL_SAL_FREE(r->data);
r->data = tmp;
r->room = room;
} else {
(void)memset_s(r->data, r->room * sizeof(BN_UINT), 0, r->room * sizeof(BN_UINT));
}
r->size = 0;
r->sign = false;
r->flag |= CRYPT_BN_FLAG_OPTIMIZER;
return CRYPT_SUCCESS;
}
/* create a BigNum and initialize to 0 */
BN_BigNum *OptimizerGetBn(BN_Optimizer *opt, uint32_t room)
{
if (opt->deep == 0) {
return NULL;
}
if ((opt->used[opt->deep - 1] + 1) < opt->used[opt->deep - 1]) {
// Avoid overflow
return NULL;
}
BN_BigNum *tmp = GetPresetBn(opt, opt->curChunk);
if (tmp == NULL) {
return NULL;
}
if (BnMake(tmp, room) != CRYPT_SUCCESS) {
return NULL;
}
opt->used[opt->deep - 1]++;
return tmp;
}
void OptimizerEnd(BN_Optimizer *opt)
{
if (opt->deep == 0) {
return;
}
opt->deep--;
uint32_t usedNum = opt->used[opt->deep];
opt->used[opt->deep] = 0;
Chunk *curChunk = opt->curChunk;
if (usedNum <= curChunk->occupied) {
curChunk->occupied -= usedNum;
return;
}
usedNum -= curChunk->occupied;
curChunk->occupied = 0;
while (usedNum >= HITLS_CRYPT_OPTIMIZER_BN_NUM) {
curChunk = curChunk->next;
curChunk->occupied = 0;
usedNum -= HITLS_CRYPT_OPTIMIZER_BN_NUM;
}
if (usedNum != 0) {
curChunk = curChunk->next;
curChunk->occupied = HITLS_CRYPT_OPTIMIZER_BN_NUM - usedNum;
}
opt->curChunk = curChunk;
return;
}
#endif /* HITLS_CRYPTO_BN */
| 2302_82127028/openHiTLS-examples_1508 | crypto/bn/src/bn_optimizer.c | C | unknown | 5,708 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#ifndef BN_OPTIMIZER_H
#define BN_OPTIMIZER_H
#include "hitls_build.h"
#ifdef HITLS_CRYPTO_BN
#include "bn_basic.h"
#ifdef __cplusplus
extern "c" {
#endif
#define CRYPT_OPTIMIZER_MAXDEEP 10
/*
* Peak memory usage of the bn process during RSA key generation. BN_NUM stands for HITLS_CRYPT_OPTIMIZER_BN_NUM.
* |----------------------------+--------+--------+--------+--------+--------|
* | key bits\memory(Kb)\BN_NUM | 16 | 24 | 32 | 48 | 64 |
* |----------------------------+--------+--------+--------+--------+--------|
* | rsa1024 | 9.0 | 9.7 | 9.7 | 10.8 | 12.0 |
* | rsa2048 | 20.4 | 21.0 | 21.1 | 22.6 | 22.6 |
* | rsa3072 | 37.8 | 38.3 | 38.5 | 40.0 | 40.0 |
* | rsa4096 | 73.5 | 73.5 | 74.2 | 75.7 | 75.7 |
* |----------------------------+--------+--------+--------+--------+--------|
*
* The number of chunk during RSA key generation. BN_NUM stands for HITLS_CRYPT_OPTIMIZER_BN_NUM.
* |----------------------------+--------+--------+--------+--------+--------|
* |key bits\chunk number\BN_NUM| 16 | 24 | 32 | 48 | 64 |
* |----------------------------+--------+--------+--------+--------+--------|
* | rsa1024 | 352 | 352 | 193 | 193 | 193 |
* | rsa2048 | 1325 | 1035 | 745 | 745 | 455 |
* | rsa3072 | 1597 | 1227 | 857 | 857 | 487 |
* | rsa4096 | 2522 | 1967 | 1412 | 1412 | 857 |
* |----------------------------+--------+--------+--------+--------+--------|
*/
#ifndef HITLS_CRYPT_OPTIMIZER_BN_NUM
#define HITLS_CRYPT_OPTIMIZER_BN_NUM 32
#endif
typedef struct ChunkStruct {
uint32_t occupied; /** < occupied of current chunk */
BN_BigNum bigNums[HITLS_CRYPT_OPTIMIZER_BN_NUM]; /** < preset BN_BigNums */
struct ChunkStruct *prev; /** < prev optimizer node */
struct ChunkStruct *next; /** < prev optimizer node */
} Chunk;
struct BnOptimizer {
uint32_t deep; /* depth of stack */
uint32_t used[CRYPT_OPTIMIZER_MAXDEEP]; /* size of the used stack */
Chunk *curChunk; /** < chunk, the last point*/
void *libCtx;
};
#ifdef __cplusplus
}
#endif
#endif /* HITLS_CRYPTO_BN */
#endif
| 2302_82127028/openHiTLS-examples_1508 | crypto/bn/src/bn_optimizer.h | C | unknown | 2,911 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#include "hitls_build.h"
#ifdef HITLS_CRYPTO_BN_PRIME
#include <stdint.h>
#include "securec.h"
#include "bsl_err_internal.h"
#include "bsl_sal.h"
#include "crypt_errno.h"
#include "bn_bincal.h"
#include "bn_optimizer.h"
/*
* Differential table of adjacent prime numbers, size = 1024
* The times of trial division will affect whether the number enters the Miller-rabin test.
* We consider common prime lengths: 1024, 2048, 4096, 8192 bits.
* 1024 bits: we choose 128 try times, ref the paper of 'A Performant, Misuse-Resistant API for Primality Testing'.
* 2048 bits: 128 try times: 1 (performance baseline)
* 384 try times: +0.15
* 512 try times: +0.03
* 1024 try times: +0.16
* 4096 bits: 1024 try times: 0.04 tps
* 2048 try times: 0.02 tps
* 8192 bits: 1024 try times: 0.02 tps
* 2048 try times: 0.02 tps
*/
static const uint8_t PRIME_DIFF_TABLE[1024] = {
0, 1, 2, 2, 4, 2, 4, 2, 4, 6, 2, 6, 4, 2, 4, 6,
6, 2, 6, 4, 2, 6, 4, 6, 8, 4, 2, 4, 2, 4, 14, 4,
6, 2, 10, 2, 6, 6, 4, 6, 6, 2, 10, 2, 4, 2, 12, 12,
4, 2, 4, 6, 2, 10, 6, 6, 6, 2, 6, 4, 2, 10, 14, 4,
2, 4, 14, 6, 10, 2, 4, 6, 8, 6, 6, 4, 6, 8, 4, 8,
10, 2, 10, 2, 6, 4, 6, 8, 4, 2, 4, 12, 8, 4, 8, 4,
6, 12, 2, 18, 6, 10, 6, 6, 2, 6, 10, 6, 6, 2, 6, 6,
4, 2, 12, 10, 2, 4, 6, 6, 2, 12, 4, 6, 8, 10, 8, 10,
8, 6, 6, 4, 8, 6, 4, 8, 4, 14, 10, 12, 2, 10, 2, 4,
2, 10, 14, 4, 2, 4, 14, 4, 2, 4, 20, 4, 8, 10, 8, 4,
6, 6, 14, 4, 6, 6, 8, 6, 12, 4, 6, 2, 10, 2, 6, 10,
2, 10, 2, 6, 18, 4, 2, 4, 6, 6, 8, 6, 6, 22, 2, 10,
8, 10, 6, 6, 8, 12, 4, 6, 6, 2, 6, 12, 10, 18, 2, 4,
6, 2, 6, 4, 2, 4, 12, 2, 6, 34, 6, 6, 8, 18, 10, 14,
4, 2, 4, 6, 8, 4, 2, 6, 12, 10, 2, 4, 2, 4, 6, 12,
12, 8, 12, 6, 4, 6, 8, 4, 8, 4, 14, 4, 6, 2, 4, 6,
2, 6, 10, 20, 6, 4, 2, 24, 4, 2, 10, 12, 2, 10, 8, 6,
6, 6, 18, 6, 4, 2, 12, 10, 12, 8, 16, 14, 6, 4, 2, 4,
2, 10, 12, 6, 6, 18, 2, 16, 2, 22, 6, 8, 6, 4, 2, 4,
8, 6, 10, 2, 10, 14, 10, 6, 12, 2, 4, 2, 10, 12, 2, 16,
2, 6, 4, 2, 10, 8, 18, 24, 4, 6, 8, 16, 2, 4, 8, 16,
2, 4, 8, 6, 6, 4, 12, 2, 22, 6, 2, 6, 4, 6, 14, 6,
4, 2, 6, 4, 6, 12, 6, 6, 14, 4, 6, 12, 8, 6, 4, 26,
18, 10, 8, 4, 6, 2, 6, 22, 12, 2, 16, 8, 4, 12, 14, 10,
2, 4, 8, 6, 6, 4, 2, 4, 6, 8, 4, 2, 6, 10, 2, 10,
8, 4, 14, 10, 12, 2, 6, 4, 2, 16, 14, 4, 6, 8, 6, 4,
18, 8, 10, 6, 6, 8, 10, 12, 14, 4, 6, 6, 2, 28, 2, 10,
8, 4, 14, 4, 8, 12, 6, 12, 4, 6, 20, 10, 2, 16, 26, 4,
2, 12, 6, 4, 12, 6, 8, 4, 8, 22, 2, 4, 2, 12, 28, 2,
6, 6, 6, 4, 6, 2, 12, 4, 12, 2, 10, 2, 16, 2, 16, 6,
20, 16, 8, 4, 2, 4, 2, 22, 8, 12, 6, 10, 2, 4, 6, 2,
6, 10, 2, 12, 10, 2, 10, 14, 6, 4, 6, 8, 6, 6, 16, 12,
2, 4, 14, 6, 4, 8, 10, 8, 6, 6, 22, 6, 2, 10, 14, 4,
6, 18, 2, 10, 14, 4, 2, 10, 14, 4, 8, 18, 4, 6, 2, 4,
6, 2, 12, 4, 20, 22, 12, 2, 4, 6, 6, 2, 6, 22, 2, 6,
16, 6, 12, 2, 6, 12, 16, 2, 4, 6, 14, 4, 2, 18, 24, 10,
6, 2, 10, 2, 10, 2, 10, 6, 2, 10, 2, 10, 6, 8, 30, 10,
2, 10, 8, 6, 10, 18, 6, 12, 12, 2, 18, 6, 4, 6, 6, 18,
2, 10, 14, 6, 4, 2, 4, 24, 2, 12, 6, 16, 8, 6, 6, 18,
16, 2, 4, 6, 2, 6, 6, 10, 6, 12, 12, 18, 2, 6, 4, 18,
8, 24, 4, 2, 4, 6, 2, 12, 4, 14, 30, 10, 6, 12, 14, 6,
10, 12, 2, 4, 6, 8, 6, 10, 2, 4, 14, 6, 6, 4, 6, 2,
10, 2, 16, 12, 8, 18, 4, 6, 12, 2, 6, 6, 6, 28, 6, 14,
4, 8, 10, 8, 12, 18, 4, 2, 4, 24, 12, 6, 2, 16, 6, 6,
14, 10, 14, 4, 30, 6, 6, 6, 8, 6, 4, 2, 12, 6, 4, 2,
6, 22, 6, 2, 4, 18, 2, 4, 12, 2, 6, 4, 26, 6, 6, 4,
8, 10, 32, 16, 2, 6, 4, 2, 4, 2, 10, 14, 6, 4, 8, 10,
6, 20, 4, 2, 6, 30, 4, 8, 10, 6, 6, 8, 6, 12, 4, 6,
2, 6, 4, 6, 2, 10, 2, 16, 6, 20, 4, 12, 14, 28, 6, 20,
4, 18, 8, 6, 4, 6, 14, 6, 6, 10, 2, 10, 12, 8, 10, 2,
10, 8, 12, 10, 24, 2, 4, 8, 6, 4, 8, 18, 10, 6, 6, 2,
6, 10, 12, 2, 10, 6, 6, 6, 8, 6, 10, 6, 2, 6, 6, 6,
10, 8, 24, 6, 22, 2, 18, 4, 8, 10, 30, 8, 18, 4, 2, 10,
6, 2, 6, 4, 18, 8, 12, 18, 16, 6, 2, 12, 6, 10, 2, 10,
2, 6, 10, 14, 4, 24, 2, 16, 2, 10, 2, 10, 20, 4, 2, 4,
8, 16, 6, 6, 2, 12, 16, 8, 4, 6, 30, 2, 10, 2, 6, 4,
6, 6, 8, 6, 4, 12, 6, 8, 12, 4, 14, 12, 10, 24, 6, 12,
6, 2, 22, 8, 18, 10, 6, 14, 4, 2, 6, 10, 8, 6, 4, 6,
30, 14, 10, 2, 12, 10, 2, 16, 2, 18, 24, 18, 6, 16, 18, 6,
2, 18, 4, 6, 2, 10, 8, 10, 6, 6, 8, 4, 6, 2, 10, 2,
12, 4, 6, 6, 2, 12, 4, 14, 18, 4, 6, 20, 4, 8, 6, 4,
8, 4, 14, 6, 4, 14, 12, 4, 2, 30, 4, 24, 6, 6, 12, 12,
14, 6, 4, 2, 4, 18, 6, 12, 8, 6, 4, 12, 2, 12, 30, 16,
2, 6, 22, 14, 6, 10, 12, 6, 2, 4, 8, 10, 6, 6, 24, 14
};
/* Times of trial division. */
static uint32_t DivisorsCnt(uint32_t bits)
{
if (bits <= 1024) { /* 1024bit */
return 128; /* 128 times check */
}
return 1024; /* 1024 times check */
}
// Minimum times of checking for Miller-Rabin.
// The probability of errors in a check is one quarter. After 64 rounds of check, the error rate is 2 ^ - 128.
static uint32_t MinChecks(uint32_t bits)
{
if (bits >= 2048) { /* 2048bit */
return 128; /* 128 rounds of verification */
}
return 64; /* 64 rounds of verification */
}
/* A BigNum mod a limb, limb < (1 << (BN_UINT_BITS >> 1)) */
static BN_UINT ModLimbHalf(const BN_BigNum *a, BN_UINT w)
{
BN_UINT rem = 0;
uint32_t i;
for (i = a->size; i > 0; i--) {
MOD_HALF(rem, rem, a->data[i - 1], w);
}
return rem;
}
static int32_t LimbCheck(const BN_BigNum *bn)
{
uint32_t bits = BN_Bits(bn);
uint32_t cnt = DivisorsCnt(bits);
int32_t ret = CRYPT_SUCCESS;
BN_UINT littlePrime = 2;
for (uint32_t i = 0; i < cnt; i++) {
// Try division. Large prime numbers do not divide small prime numbers.
littlePrime += PRIME_DIFF_TABLE[i];
BN_UINT mod = ModLimbHalf(bn, littlePrime);
if (mod == 0) {
if (BN_IsLimb(bn, littlePrime) == false) { // small prime judgement
ret = CRYPT_BN_NOR_CHECK_PRIME;
}
break;
}
}
return ret;
}
/* The random number increases by 2 each time, and added for n times,
so that it is mutually primed to all data in the prime table. */
static int32_t FillUp(BN_BigNum *rnd, const BN_UINT *mods, uint32_t modsLen)
{
uint32_t i;
uint32_t complete = 0;
uint32_t bits = BN_Bits(rnd);
uint32_t cnt = modsLen;
BN_UINT inc = 0;
while (complete == 0) {
BN_UINT littlePrime = 2; // the minimum prime = 2
for (i = 1; i < cnt; i++) {
/* check */
littlePrime += PRIME_DIFF_TABLE[i];
if ((mods[i] + inc) % littlePrime == 0) {
inc += 2; // inc increases by 2 each time
break;
}
if (i == cnt - 1) { // end and exit
complete = 1;
}
}
if (inc + 2 == 0) { // inc increases by 2 each time. Check whether the inc may overflow.
BSL_ERR_PUSH_ERROR(CRYPT_BN_NOR_CHECK_PRIME);
return CRYPT_BN_NOR_CHECK_PRIME;
}
}
int32_t ret = BN_AddLimb(rnd, rnd, inc);
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
return ret;
}
// If the random number length of a prime number is incorrect, generate a new random number.
if (BN_Bits(rnd) != bits) {
BSL_ERR_PUSH_ERROR(CRYPT_BN_NOR_CHECK_PRIME);
return CRYPT_BN_NOR_CHECK_PRIME;
}
return CRYPT_SUCCESS;
}
/* Generate random numbers that can be mutually primed with the data in the small prime number table. */
static int32_t ProbablePrime(BN_BigNum *rnd, BN_BigNum *e, uint32_t bits, bool half, BN_Optimizer *opt)
{
const int32_t maxCnt = 100; /* try 100 times */
int32_t tryCnt = 0;
uint32_t i;
int32_t ret;
uint32_t cnt = DivisorsCnt(bits);
ret = OptimizerStart(opt);
if (ret != CRYPT_SUCCESS) {
return ret;
}
BN_BigNum *mods = OptimizerGetBn(opt, cnt);
if (mods == NULL) {
OptimizerEnd(opt);
BSL_ERR_PUSH_ERROR(CRYPT_BN_OPTIMIZER_GET_FAIL);
return CRYPT_BN_OPTIMIZER_GET_FAIL;
}
uint32_t top = ((half == true) ? BN_RAND_TOP_TWOBIT : BN_RAND_TOP_ONEBIT);
do {
tryCnt++;
if (tryCnt > maxCnt) {
/* If it cannot be generated after loop 100 times, a failure message is returned. */
OptimizerEnd(opt);
/* In this case, the random number may be incorrect. Keep the error information. */
BSL_ERR_PUSH_ERROR(CRYPT_BN_NOR_GEN_PRIME);
return CRYPT_BN_NOR_GEN_PRIME;
}
// 'top' can control whether to set the most two significant bits to 1.
// RSA key generation usually focuses on this parameter to ensure the length of p*q.
ret = BN_RandEx(opt->libCtx, rnd, bits, top, BN_RAND_BOTTOM_ONEBIT);
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
OptimizerEnd(opt);
return ret;
}
BN_UINT littlePrime = 2; // the minimum prime = 2
// Random number rnd divided by the prime number in the table of small prime numbers, modulo mods.
for (i = 1; i < cnt; i++) {
littlePrime += PRIME_DIFF_TABLE[i];
mods->data[i] = ModLimbHalf(rnd, littlePrime);
}
// Check the mods and supplement the rnd.
ret = FillUp(rnd, mods->data, cnt);
if (ret != CRYPT_BN_NOR_CHECK_PRIME && ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
OptimizerEnd(opt);
return ret;
}
if (ret != CRYPT_BN_NOR_CHECK_PRIME && e != NULL) {
// check if rnd-1 and e are coprime
// reference: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf A.1.3
BN_BigNum *rnd1 = OptimizerGetBn(opt, BITS_TO_BN_UNIT(bits));
BN_BigNum *inv = OptimizerGetBn(opt, e->size);
if (rnd1 == NULL || inv == NULL) {
OptimizerEnd(opt);
BSL_ERR_PUSH_ERROR(CRYPT_BN_OPTIMIZER_GET_FAIL);
return CRYPT_BN_OPTIMIZER_GET_FAIL;
}
ret = BN_SubLimb(rnd1, rnd, 1);
if (ret != CRYPT_SUCCESS) {
OptimizerEnd(opt);
BSL_ERR_PUSH_ERROR(ret);
return ret;
}
ret = BN_ModInv(inv, rnd1, e, opt);
}
} while (ret == CRYPT_BN_NOR_CHECK_PRIME);
OptimizerEnd(opt);
return ret;
}
static int32_t BnCheck(const BN_BigNum *bnSubOne, const BN_BigNum *bnSubThree,
const BN_BigNum *divisor, const BN_BigNum *rnd, const BN_Mont *mont)
{
if (bnSubOne == NULL || bnSubThree == NULL || divisor == NULL || rnd == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_BN_OPTIMIZER_GET_FAIL);
return CRYPT_BN_OPTIMIZER_GET_FAIL;
}
if (mont == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_MEM_ALLOC_FAIL);
return CRYPT_MEM_ALLOC_FAIL;
}
return CRYPT_SUCCESS;
}
static int32_t GenRnd(void *libCtx, BN_BigNum *rnd, const BN_BigNum *bnSubThree)
{
int32_t ret = BN_RandRangeEx(libCtx, rnd, bnSubThree);
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
return ret;
}
return BN_AddLimb(rnd, rnd, 2); /* bn - 3 + 2 = bn - 1 */
}
static bool SumCorrect(BN_BigNum *sum, const BN_BigNum *bnSubOne)
{
if (BN_IsOne(sum) || BN_Cmp(sum, bnSubOne) == 0) {
(void)BN_SetLimb(sum, 1);
return true;
}
return false;
}
int32_t MillerRabinCheckCore(const BN_BigNum *bn, BN_Mont *mont, BN_BigNum *rnd,
const BN_BigNum *divisor, const BN_BigNum *bnSubOne, const BN_BigNum *bnSubThree,
uint32_t p, uint32_t checkTimes, BN_Optimizer *opt, BN_CbCtx *cb)
{
uint32_t i, j;
int32_t ret = CRYPT_SUCCESS;
uint32_t checks = (checkTimes == 0) ? MinChecks(BN_Bits(bn)) : checkTimes;
BN_BigNum *sum = rnd;
for (i = 0; i < checks; i++) {
// 3.1 Generate a random number rnd, 2 < rnd < n-1
ret = GenRnd(opt->libCtx, rnd, bnSubThree);
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
return ret;
}
// 3.2 Calculate base = rnd^divisor mod bn
ret = BN_MontExp(sum, rnd, divisor, mont, opt);
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
return ret;
}
for (j = 0; j < p; j++) {
// If sum is equal to 1 or bn-1, the modulus square result must be 1. Exit directly.
if (SumCorrect(sum, bnSubOne)) {
break;
}
// sum < bn
ret = MontSqrCore(sum, sum, mont, opt);
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
return ret;
}
// Inverse negation of Miller Rabin's theorem, if equal to 1, bn is not a prime number.
if (BN_IsOne(sum)) {
ret = CRYPT_BN_NOR_CHECK_PRIME;
return ret;
}
}
// 3.4 Fermat's little theorem inverse negation if sum = rnd^(bn -1) != 1 mod bn, bn is not a prime number.
if (!BN_IsOne(sum)) {
ret = CRYPT_BN_NOR_CHECK_PRIME;
return ret;
}
#ifdef HITLS_CRYPTO_BN_CB
ret = BN_CbCtxCall(cb, 0, 0);
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
return ret;
}
#else
(void)cb;
#endif
}
return ret;
}
static int32_t BnSubGet(BN_BigNum *bnSubOne, BN_BigNum *bnSubThree, const BN_BigNum *bn)
{
int32_t ret = BN_SubLimb(bnSubOne, bn, 1); /* bn - 1 */
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
return ret;
}
return BN_SubLimb(bnSubThree, bn, 3); /* bn - 3 */
}
static int32_t PrimeLimbCheck(const BN_BigNum *bn)
{
if (BN_IsLimb(bn, 2) || BN_IsLimb(bn, 3)) { /* 2 and 3 directly determine that the number is a prime number. */
return CRYPT_SUCCESS;
}
BSL_ERR_PUSH_ERROR(CRYPT_BN_NOR_CHECK_PRIME);
return CRYPT_BN_NOR_CHECK_PRIME;
}
static uint32_t GetP(const BN_BigNum *bn)
{
uint32_t p = 0;
while (!BN_GetBit(bn, p)) {
p++;
}
return p;
}
// CRYPT_SUCCESS is returned for a prime number,
// and CRYPT_BN_NOR_CHECK_PRIME is returned for a non-prime number. Other error codes are returned.
static int32_t MillerRabinPrimeVerify(const BN_BigNum *bn, uint32_t checkTimes, BN_Optimizer *opt, BN_CbCtx *cb)
{
int32_t ret = CRYPT_SUCCESS;
uint32_t p;
if (PrimeLimbCheck(bn) == CRYPT_SUCCESS) { /* 2 and 3 directly determine that the number is a prime number. */
return CRYPT_SUCCESS;
}
if (!BN_GetBit(bn, 0)) { // even
BSL_ERR_PUSH_ERROR(CRYPT_BN_NOR_CHECK_PRIME);
return CRYPT_BN_NOR_CHECK_PRIME;
}
ret = OptimizerStart(opt);
if (ret != CRYPT_SUCCESS) {
return ret;
}
BN_BigNum *bnSubOne = OptimizerGetBn(opt, bn->size); // bnSubOne = bn - 1
BN_BigNum *bnSubThree = OptimizerGetBn(opt, bn->size); // bnSubThree = bn - 3
BN_BigNum *divisor = OptimizerGetBn(opt, bn->size); // divisor = bnSubOne / 2^p
BN_BigNum *rnd = OptimizerGetBn(opt, bn->size); // rnd to verify bn
BN_Mont *mont = BN_MontCreate(bn);
ret = BnCheck(bnSubOne, bnSubThree, divisor, rnd, mont);
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
goto err;
}
ret = BnSubGet(bnSubOne, bnSubThree, bn);
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
goto err;
}
// 1. Extract the power p of factor 2 in bnSubOne.
p = GetP(bnSubOne);
// 2. Number after factor 2 is extracted by bnSubOne. divisor = (bn - 1) / 2^p
ret = BN_Rshift(divisor, bnSubOne, p);
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
goto err;
}
ret = MillerRabinCheckCore(bn, mont, rnd, divisor, bnSubOne, bnSubThree, p, checkTimes, opt, cb);
err:
BN_MontDestroy(mont);
OptimizerEnd(opt);
return ret;
}
// CRYPT_SUCCESS is returned for a prime number,
// and CRYPT_BN_NOR_CHECK_PRIME is returned for a non-prime number. Other error codes are returned.
int32_t BN_PrimeCheck(const BN_BigNum *bn, uint32_t checkTimes, BN_Optimizer *opt, BN_CbCtx *cb)
{
if (bn == NULL || opt == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return CRYPT_NULL_INPUT;
}
int32_t ret;
// Check whether the value is 0 or 1.
if (BN_IsZero(bn) || BN_IsOne(bn)) {
return CRYPT_BN_NOR_CHECK_PRIME;
}
// Check whether the number is negative.
if (bn->sign == 1) {
BSL_ERR_PUSH_ERROR(CRYPT_BN_NOR_CHECK_PRIME);
return CRYPT_BN_NOR_CHECK_PRIME;
}
ret = LimbCheck(bn);
if (ret != CRYPT_SUCCESS) {
return ret;
}
#ifdef HITLS_CRYPTO_BN_CB
ret = BN_CbCtxCall(cb, 0, 0);
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
return ret;
}
#endif
return MillerRabinPrimeVerify(bn, checkTimes, opt, cb);
}
static int32_t GenPrimeLimb(BN_BigNum *bn, uint32_t bits, bool half, BN_Optimizer *opt)
{
const BN_UINT baseAll[11] = {0, 2, 4, 6, 11, 18, 31, 54, 97, 172, 309};
const BN_UINT cntAll[11] = {2, 2, 2, 5, 7, 13, 23, 43, 75, 137, 255};
const BN_UINT baseHalf[11] = {1, 3, 5, 9, 15, 24, 43, 76, 135, 242, 439};
const BN_UINT cntHalf[11] = {1, 1, 1, 2, 3, 7, 11, 21, 37, 67, 125};
const BN_UINT *base = baseAll;
const BN_UINT *cnt = cntAll;
if (half == true) {
base = baseHalf;
cnt = cntHalf;
}
int32_t ret = OptimizerStart(opt);
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
return ret;
}
BN_BigNum *bnCnt = OptimizerGetBn(opt, BITS_TO_BN_UNIT(bits));
BN_BigNum *bnRnd = OptimizerGetBn(opt, BITS_TO_BN_UNIT(bits));
if (bnCnt == NULL || bnRnd == NULL) {
OptimizerEnd(opt);
BSL_ERR_PUSH_ERROR(CRYPT_BN_OPTIMIZER_GET_FAIL);
return CRYPT_BN_OPTIMIZER_GET_FAIL;
}
(void)BN_SetLimb(bnCnt, cnt[bits - 2]); /* offset, the minimum bit of the interface is 2. */
ret = BN_RandRangeEx(opt->libCtx, bnRnd, bnCnt);
if (ret != CRYPT_SUCCESS) {
OptimizerEnd(opt);
BSL_ERR_PUSH_ERROR(ret);
return ret;
}
BN_UINT rnd = bnRnd->data[0] + base[bits - 2]; /* offset, the minimum bit of the interface is 2. */
OptimizerEnd(opt);
BN_UINT littlePrime = 2;
for (BN_UINT i = 1; i <= rnd; i++) {
littlePrime += PRIME_DIFF_TABLE[i];
}
return BN_SetLimb(bn, littlePrime);
}
static int32_t GenCheck(BN_BigNum *bn, uint32_t bits, const BN_Optimizer *opt)
{
if (bn == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return CRYPT_NULL_INPUT;
}
if (opt == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return CRYPT_NULL_INPUT;
}
if (bits < 2) { // The number of bits less than 2 can only be 0 or 1. The prime number cannot be generated.
BSL_ERR_PUSH_ERROR(CRYPT_BN_NOR_CHECK_PRIME);
return CRYPT_BN_NOR_CHECK_PRIME;
}
return BnExtend(bn, BITS_TO_BN_UNIT(bits));
}
// If the prime number r is generated successfully, CRYPT_SUCCESS is returned.
// If the prime number r fails to be generated, CRYPT_BN_NOR_GEN_PRIME is returned. Other error codes are returned.
// If half is 1, the prime number whose two most significant bits are 1 is generated.
int32_t BN_GenPrime(BN_BigNum *r, BN_BigNum *e, uint32_t bits, bool half, BN_Optimizer *opt, BN_CbCtx *cb)
{
int32_t time = 0;
#ifndef HITLS_CRYPTO_BN_CB
(void)cb;
const int32_t maxTime = 256; /* The maximum number of cycles is 256. If no prime number is generated after the
* maximum number of cycles, the operation fails. */
#endif
int32_t ret = GenCheck(r, bits, opt);
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
return ret;
}
if (bits < 13) { // < 13 is limited by the small prime table of 1024 size.
return GenPrimeLimb(r, bits, half, opt);
}
ret = OptimizerStart(opt);
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
return ret;
}
/* To preventing insufficient space in addition operations when the rnd is constructed. */
BN_BigNum *rnd = OptimizerGetBn(opt, BITS_TO_BN_UNIT(bits) + 1);
if (rnd == NULL) {
OptimizerEnd(opt);
BSL_ERR_PUSH_ERROR(CRYPT_BN_OPTIMIZER_GET_FAIL);
return CRYPT_BN_OPTIMIZER_GET_FAIL;
}
do {
#ifdef HITLS_CRYPTO_BN_CB
if (BN_CbCtxCall(cb, time, 0) != CRYPT_SUCCESS) {
#else
if (time == maxTime) {
#endif
OptimizerEnd(opt);
BSL_ERR_PUSH_ERROR(CRYPT_BN_NOR_GEN_PRIME);
return CRYPT_BN_NOR_GEN_PRIME;
}
// Generate a random number bn that may be a prime.
ret = ProbablePrime(rnd, e, bits, half, opt);
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
OptimizerEnd(opt);
return ret;
}
ret = MillerRabinPrimeVerify(rnd, 0, opt, cb);
time++;
} while (ret != CRYPT_SUCCESS);
OptimizerEnd(opt);
return BN_Copy(r, rnd);
}
#endif /* HITLS_CRYPTO_BN_PRIME */
| 2302_82127028/openHiTLS-examples_1508 | crypto/bn/src/bn_prime.c | C | unknown | 22,286 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#include "hitls_build.h"
#ifdef HITLS_CRYPTO_BN_RAND
#include <stdint.h>
#include "securec.h"
#include "bsl_err_internal.h"
#include "bsl_sal.h"
#include "crypt_errno.h"
#include "bn_basic.h"
#include "bn_bincal.h"
#include "crypt_util_rand.h"
static int32_t RandGenerate(void *libCtx, BN_BigNum *r, uint32_t bits)
{
int32_t ret;
uint32_t room = BITS_TO_BN_UNIT(bits);
BN_UINT mask;
// Maxbits = (1 << 29) --> MaxBytes = (1 << 26), hence BN_BITS_TO_BYTES(bits) will not exceed the upper limit.
uint32_t byteSize = BN_BITS_TO_BYTES(bits);
uint8_t *buf = BSL_SAL_Malloc(byteSize);
if (buf == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_MEM_ALLOC_FAIL);
return CRYPT_MEM_ALLOC_FAIL;
}
ret = CRYPT_RandEx(libCtx, buf, byteSize);
if (ret == CRYPT_NO_REGIST_RAND) {
BSL_ERR_PUSH_ERROR(ret);
goto ERR;
}
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(CRYPT_BN_RAND_GEN_FAIL);
ret = CRYPT_BN_RAND_GEN_FAIL;
goto ERR;
}
ret = BN_Bin2Bn(r, buf, byteSize);
BSL_SAL_CleanseData(buf, byteSize);
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
goto ERR;
}
mask = (BN_UINT)(-1) >> ((BN_UINT_BITS - bits % BN_UINT_BITS) % BN_UINT_BITS);
r->data[room - 1] &= mask;
r->size = BinFixSize(r->data, room);
ERR:
BSL_SAL_FREE(buf);
return ret;
}
static int32_t CheckTopAndBottom(uint32_t bits, uint32_t top, uint32_t bottom)
{
if (top > BN_RAND_TOP_TWOBIT) {
BSL_ERR_PUSH_ERROR(CRYPT_BN_ERR_RAND_TOP_BOTTOM);
return CRYPT_BN_ERR_RAND_TOP_BOTTOM;
}
if (bottom > BN_RAND_BOTTOM_TWOBIT) {
BSL_ERR_PUSH_ERROR(CRYPT_BN_ERR_RAND_TOP_BOTTOM);
return CRYPT_BN_ERR_RAND_TOP_BOTTOM;
}
if (top > bits || bottom > bits) {
BSL_ERR_PUSH_ERROR(CRYPT_BN_ERR_RAND_BITS_NOT_ENOUGH);
return CRYPT_BN_ERR_RAND_BITS_NOT_ENOUGH;
}
return CRYPT_SUCCESS;
}
int32_t BN_Rand(BN_BigNum *r, uint32_t bits, uint32_t top, uint32_t bottom)
{
return BN_RandEx(NULL, r, bits, top, bottom);
}
int32_t BN_RandEx(void *libCtx, BN_BigNum *r, uint32_t bits, uint32_t top, uint32_t bottom)
{
if (r == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return CRYPT_NULL_INPUT;
}
int32_t ret = CheckTopAndBottom(bits, top, bottom);
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
return ret;
}
if (bits == 0) {
return BN_Zeroize(r);
}
if (bits > BN_MAX_BITS) {
BSL_ERR_PUSH_ERROR(CRYPT_BN_BITS_TOO_MAX);
return CRYPT_BN_BITS_TOO_MAX;
}
ret = BnExtend(r, BITS_TO_BN_UNIT(bits));
if (ret != CRYPT_SUCCESS) {
return ret;
}
ret = RandGenerate(libCtx, r, bits);
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
return ret;
}
r->data[0] |= (bottom == BN_RAND_BOTTOM_TWOBIT) ? 0x3 : (BN_UINT)bottom; // CheckTopAndBottom ensure that bottom>0
if (top == BN_RAND_TOP_ONEBIT) {
(void)BN_SetBit(r, bits - 1);
} else if (top == BN_RAND_TOP_TWOBIT) {
(void)BN_SetBit(r, bits - 1);
(void)BN_SetBit(r, bits - 2); /* the most significant 2 bits are 1 */
}
r->size = BinFixSize(r->data, r->room);
return ret;
}
static int32_t InputCheck(BN_BigNum *r, const BN_BigNum *p)
{
if (r == NULL || p == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return CRYPT_NULL_INPUT;
}
if (BN_IsZero(p)) {
BSL_ERR_PUSH_ERROR(CRYPT_BN_ERR_RAND_ZERO);
return CRYPT_BN_ERR_RAND_ZERO;
}
if (p->sign == true) {
BSL_ERR_PUSH_ERROR(CRYPT_BN_ERR_RAND_NEGATIVE);
return CRYPT_BN_ERR_RAND_NEGATIVE;
}
return BnExtend(r, p->size);
}
int32_t BN_RandRange(BN_BigNum *r, const BN_BigNum *p)
{
return BN_RandRangeEx(NULL, r, p);
}
int32_t BN_RandRangeEx(void *libCtx, BN_BigNum *r, const BN_BigNum *p)
{
const int32_t maxCnt = 100; /* try 100 times */
int32_t tryCnt = 0;
int32_t ret;
ret = InputCheck(r, p);
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
return ret;
}
ret = BN_Zeroize(r);
if (ret != CRYPT_SUCCESS) {
return ret;
}
if (BN_IsOne(p)) {
return CRYPT_SUCCESS;
}
uint32_t bits = BN_Bits(p);
do {
tryCnt++;
if (tryCnt > maxCnt) {
/* The success rate is more than 50%. */
/* Return a failure if failed to generated after try 100 times */
BSL_ERR_PUSH_ERROR(CRYPT_BN_RAND_GEN_FAIL);
return CRYPT_BN_RAND_GEN_FAIL;
}
ret = RandGenerate(libCtx, r, bits);
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
return ret;
}
} while (BinCmp(r->data, r->size, p->data, p->size) >= 0);
return ret;
}
#endif /* HITLS_CRYPTO_BN_RAND */
| 2302_82127028/openHiTLS-examples_1508 | crypto/bn/src/bn_rand.c | C | unknown | 5,406 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#include "hitls_build.h"
#ifdef HITLS_CRYPTO_ECC
#include <stdint.h>
#include <stdbool.h>
#include "securec.h"
#include "bsl_err_internal.h"
#include "bsl_sal.h"
#include "crypt_errno.h"
#include "crypt_utils.h"
#include "bn_optimizer.h"
#include "crypt_bn.h"
static uint32_t GetExp(const BN_BigNum *bn)
{
uint32_t s = 0;
while (!BN_GetBit(bn, s)) {
s++;
}
return s;
}
// p does not perform prime number check, but performs parity check.
static int32_t CheckParam(const BN_BigNum *a, const BN_BigNum *p)
{
if (BN_IsZero(p) || BN_IsOne(p)) {
BSL_ERR_PUSH_ERROR(CRYPT_BN_ERR_SQRT_PARA);
return CRYPT_BN_ERR_SQRT_PARA;
}
if (!BN_GetBit(p, 0)) { // p must be odd prime
BSL_ERR_PUSH_ERROR(CRYPT_BN_ERR_SQRT_PARA);
return CRYPT_BN_ERR_SQRT_PARA;
}
if (p->sign || a->sign) { // p、a must be positive
BSL_ERR_PUSH_ERROR(CRYPT_BN_ERR_SQRT_PARA);
return CRYPT_BN_ERR_SQRT_PARA;
}
if (BN_Cmp(p, a) <= 0) {
BSL_ERR_PUSH_ERROR(CRYPT_BN_ERR_SQRT_PARA);
return CRYPT_BN_ERR_SQRT_PARA;
}
return CRYPT_SUCCESS;
}
// r = +- a^((p + 1)/4)
static int32_t CalculationRoot(BN_BigNum *r, const BN_BigNum *a, const BN_BigNum *p, BN_Mont *mont, BN_Optimizer *opt)
{
int32_t ret = OptimizerStart(opt);
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
return ret;
}
BN_BigNum *temp = OptimizerGetBn(opt, p->size);
if (temp == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_MEM_ALLOC_FAIL);
ret = CRYPT_MEM_ALLOC_FAIL;
goto ERR;
}
GOTO_ERR_IF_EX(BN_AddLimb(temp, p, 1), ret); // p + 1
GOTO_ERR_IF_EX(BN_Rshift(temp, temp, 2), ret); // (p + 1) / 4 = (p + 1) >> 2
GOTO_ERR_IF(BN_MontExp(r, a, temp, mont, opt), ret);
ERR:
OptimizerEnd(opt);
return ret;
}
static int32_t LegendreFastTempDataCheck(const BN_BigNum *a, const BN_BigNum *pp)
{
if (a == NULL || pp == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_MEM_ALLOC_FAIL);
return CRYPT_MEM_ALLOC_FAIL;
}
return CRYPT_SUCCESS;
}
int32_t LegendreFast(BN_BigNum *z, const BN_BigNum *p, int32_t *legendre, BN_Optimizer *opt)
{
int32_t l = 1;
BN_BigNum *temp = NULL;
int32_t ret = OptimizerStart(opt);
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
return ret;
}
BN_BigNum *a = OptimizerGetBn(opt, p->size); // The variable has been checked for NULL in BN_Copy.
BN_BigNum *pp = OptimizerGetBn(opt, p->size);
GOTO_ERR_IF(LegendreFastTempDataCheck(a, pp), ret);
if (BN_IsOne(z)) {
*legendre = 1;
goto ERR;
}
if (BN_IsZero(z)) {
*legendre = 0;
goto ERR;
}
GOTO_ERR_IF_EX(BN_Copy(a, z), ret);
GOTO_ERR_IF_EX(BN_Copy(pp, p), ret);
while (true) {
if (BN_IsZero(a)) {
*legendre = BN_IsOne(pp) ? l : 0;
break;
}
// Theorem: p is an odd prime number, a and b are numbers that are not divisible by p. (a|p)(b|p) = (ab|p)
// a = aa * 2^exp
// (a|pp) = (2|pp)^exp * (aa|pp)
// If exp is an even number, (a|pp) = (aa|pp)
uint32_t exp = GetExp(a);
GOTO_ERR_IF_EX(BN_Rshift(a, a, exp), ret);
if ((exp & 1) != 0) {
// pp = +- 1 mod 8, 2 is its quadratic remainder. pp = +-3 mod 8, 2 is its non-quadric remainder.
if ((pp->data[0] & 1) != 0) {
// pp->data[0] % 8 = pp->data[0] & 7
// pp = +- 1 mod 8 = 7 or 1 mod
l = ((pp->data[0] & 7) == 1 || (pp->data[0] & 7) == 7) ? l : -l;
} else {
l = 0;
}
}
// pp->data[0] % 4 = pp->data[0] & 3
// K(a|pp) = K(pp|a) * (-1)^((a-1)*(pp-1)/4)
// (a-1)*(pp-1)/4 is an even number only when at least one of A and P mod 4 = 1;
// if both A and P mod 4 = 3, (a-1)*(pp-1)/4 is an odd number.
if (((pp->data[0] & 3) == 3) && ((a->data[0] & 3) == 3)) {
l = -l;
}
// K(pp|a) = K(pp%a|a), swap(a,pp)
GOTO_ERR_IF_EX(BN_Div(NULL, pp, pp, a, opt), ret);
temp = a;
a = pp;
pp = temp;
}
ret = CRYPT_SUCCESS;
ERR:
OptimizerEnd(opt);
return ret;
}
// Find z so that legendre(z / p) = z^((p-1)/2) mod p != 1
static int32_t GetLegendreZ(BN_BigNum *z, const BN_BigNum *p, BN_Optimizer *opt)
{
uint32_t maxCnt = 50; // A random number can be generated cyclically for a maximum of 50 times.
int32_t ret = OptimizerStart(opt);
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
return ret;
}
int32_t legendre;
BN_BigNum *exp = OptimizerGetBn(opt, p->size); // exp = (p - 1) / 2
if (exp == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_MEM_ALLOC_FAIL);
ret = CRYPT_MEM_ALLOC_FAIL;
goto ERR;
}
GOTO_ERR_IF_EX(BN_SubLimb(exp, p, 1), ret);
GOTO_ERR_IF_EX(BN_Rshift(exp, exp, 1), ret);
while (maxCnt > 0) {
GOTO_ERR_IF_EX(BN_RandRangeEx(opt->libCtx, z, p), ret);
maxCnt--;
if (BN_IsZero(z)) {
continue;
}
GOTO_ERR_IF_EX(LegendreFast(z, p, &legendre, opt), ret);
if (legendre == -1) {
ret = CRYPT_SUCCESS;
goto ERR;
}
}
ret = CRYPT_BN_ERR_LEGENDE_DATA;
ERR:
OptimizerEnd(opt);
return ret;
}
static int32_t SetParaR(BN_BigNum *r, BN_BigNum *q, const BN_BigNum *a, BN_Mont *mont, BN_Optimizer *opt)
{
int32_t ret = OptimizerStart(opt);
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
return ret;
}
BN_BigNum *temp = OptimizerGetBn(opt, q->size);
if (temp == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_MEM_ALLOC_FAIL);
ret = CRYPT_MEM_ALLOC_FAIL;
goto ERR;
}
GOTO_ERR_IF_EX(BN_AddLimb(temp, q, 1), ret); // q + 1
GOTO_ERR_IF_EX(BN_Rshift(temp, temp, 1), ret); // (p + 1) / 2
GOTO_ERR_IF(BN_MontExp(r, a, temp, mont, opt), ret); // r = a^((q+1)/2) mod p
ERR:
OptimizerEnd(opt);
return ret;
}
static int32_t TonelliShanksCalculation(BN_BigNum *r, BN_BigNum *c, BN_BigNum *t,
uint32_t s, const BN_BigNum *p, BN_Optimizer *opt)
{
uint32_t m = s;
uint32_t i, j;
int32_t ret = OptimizerStart(opt);
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
return ret;
}
BN_BigNum *b = OptimizerGetBn(opt, p->size);
BN_BigNum *tempT = OptimizerGetBn(opt, p->size);
if (b == NULL || tempT == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_MEM_ALLOC_FAIL);
ret = CRYPT_MEM_ALLOC_FAIL;
goto ERR;
}
while (!BN_IsOne(t)) {
// Find an i (0 < i < s) so that t^(2^i) = 1
i = 1;
// repeat modulus square
GOTO_ERR_IF_EX(BN_ModSqr(tempT, t, p, opt), ret);
while (!BN_IsOne(tempT)) {
i++;
if (i >= m) {
ret = CRYPT_BN_ERR_NO_SQUARE_ROOT;
BSL_ERR_PUSH_ERROR(ret);
goto ERR;
}
GOTO_ERR_IF_EX(BN_ModSqr(tempT, tempT, p, opt), ret);
}
// b = c^(2^(m-i-1)), if m-i-1 == 0, b = c
GOTO_ERR_IF_EX(BN_Copy(b, c), ret);
for (j = m - i - 1; j > 0; j--) {
GOTO_ERR_IF_EX(BN_ModSqr(b, b, p, opt), ret);
}
GOTO_ERR_IF_EX(BN_ModMul(r, r, b, p, opt), ret); // r = r * b
GOTO_ERR_IF_EX(BN_ModSqr(c, b, p, opt), ret); // c = b*b
GOTO_ERR_IF_EX(BN_ModMul(t, t, c, p, opt), ret); // t = t * b * b = t * c
m = i;
}
ret = CRYPT_SUCCESS;
ERR:
OptimizerEnd(opt);
return ret;
}
static int32_t SqrtVerify(
BN_BigNum *r, const BN_BigNum *a, const BN_BigNum *p, BN_Optimizer *opt)
{
int32_t ret = OptimizerStart(opt);
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
return ret;
}
BN_BigNum *square = OptimizerGetBn(opt, p->size);
if (square == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_MEM_ALLOC_FAIL);
ret = CRYPT_MEM_ALLOC_FAIL;
goto ERR;
}
GOTO_ERR_IF_EX(BN_ModSqr(square, r, p, opt), ret);
if (BN_Cmp(square, a) != 0) {
ret = CRYPT_BN_ERR_NO_SQUARE_ROOT;
BSL_ERR_PUSH_ERROR(ret);
goto ERR;
}
ERR:
OptimizerEnd(opt);
return ret;
}
static int32_t BN_ModSqrtTempDataCheck(const BN_BigNum *pSubOne, const BN_BigNum *q,
const BN_BigNum *z, const BN_BigNum *c, const BN_BigNum *t)
{
if (pSubOne == NULL || q == NULL || z == NULL || c == NULL || t == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_MEM_ALLOC_FAIL);
return CRYPT_MEM_ALLOC_FAIL;
}
return CRYPT_SUCCESS;
}
/* 1. Input parameters a and p. p is an odd prime number, and a is an integer (0 <= a <= p-1)
2. For P-1 processing, let p-1 = q * 2^s
3. If s=1,r = a^((p + 1)/4)
4. Randomly select z (1<= z <= p-1) so that the Legendre symbol of z to p equals -1. (z, p) = 1, (z/p) = a^((p-1)/2)
5. Setting c = z^q, r = a^((q+1)/2), t = a^q, m = s
6. Circulation
1) If t = 1, return r.
2) Find an i (0 < i < m) so that t^(2^i) = 1.
3) b = c^(2^(m-i-1)), r = r * b, t = t*b*b, c = b*b, m = i
7. Verification */
int32_t BN_ModSqrt(BN_BigNum *r, const BN_BigNum *a, const BN_BigNum *p, BN_Optimizer *opt)
{
if (r == NULL || a == NULL || p == NULL || opt == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return CRYPT_NULL_INPUT;
}
int32_t ret = OptimizerStart(opt);
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
return ret;
}
uint32_t s = 0;
BN_Mont *mont = NULL;
BN_BigNum *pSubOne = OptimizerGetBn(opt, p->size);
BN_BigNum *q = OptimizerGetBn(opt, p->size);
BN_BigNum *z = OptimizerGetBn(opt, p->size);
BN_BigNum *c = OptimizerGetBn(opt, p->size);
BN_BigNum *t = OptimizerGetBn(opt, p->size);
GOTO_ERR_IF(BN_ModSqrtTempDataCheck(pSubOne, q, z, c, t), ret);
GOTO_ERR_IF_EX(CheckParam(a, p), ret);
if (BN_IsZero(a) || BN_IsOne(a)) {
GOTO_ERR_IF_EX(BN_Copy(r, a), ret);
goto VERIFY;
}
mont = BN_MontCreate(p);
if (mont == NULL) {
ret = CRYPT_MEM_ALLOC_FAIL;
BSL_ERR_PUSH_ERROR(ret);
goto ERR;
}
GOTO_ERR_IF_EX(BN_SubLimb(pSubOne, p, 1), ret);
s = GetExp(pSubOne); // Obtains the power s of factor 2 in p-1.
GOTO_ERR_IF_EX(BN_Rshift(q, pSubOne, s), ret); // p - 1 = q * 2^s
if (s == 1) {
// s==1,r = +- n^((p + 1)/4)
GOTO_ERR_IF_EX(CalculationRoot(r, a, p, mont, opt), ret);
goto VERIFY;
}
// Randomly select z(1<= z <= p-1), so that the Legendre symbol of z to p equals -1. (z, p) = 1, (z/p) = a^((p-1)/2)
GOTO_ERR_IF(GetLegendreZ(z, p, opt), ret);
GOTO_ERR_IF(BN_MontExp(c, z, q, mont, opt), ret); // c = z^q mod p
GOTO_ERR_IF(BN_MontExp(t, a, q, mont, opt), ret); // t = a^q mod p
GOTO_ERR_IF_EX(SetParaR(r, q, a, mont, opt), ret); // r = a^((q+1)/2) mod p
// Circulation
// 1) If t = 1, return r.
// 2) Find an i (0 < i < m) so that t^(2^i) = 1
// 3) b = c^(2^(m-i-1)), r = r * b, t = t*b*b, c = b*b, m = i
GOTO_ERR_IF_EX(TonelliShanksCalculation(r, c, t, s, p, opt), ret);
VERIFY:
GOTO_ERR_IF_EX(SqrtVerify(r, a, p, opt), ret);
ERR:
OptimizerEnd(opt);
BN_MontDestroy(mont);
return ret;
}
#endif /* HITLS_CRYPTO_ECC */
| 2302_82127028/openHiTLS-examples_1508 | crypto/bn/src/bn_sqrt.c | C | unknown | 11,863 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#include "hitls_build.h"
#ifdef HITLS_CRYPTO_BN
#include "securec.h"
#include "bn_bincal.h"
#include "crypt_errno.h"
#include "bsl_err_internal.h"
/* the user should guaranteed a.size >= b.size */
int32_t USub(BN_BigNum *r, const BN_BigNum *a, const BN_BigNum *b)
{
uint32_t maxSize = a->size;
uint32_t minSize = b->size;
// Ensure that r is sufficient.
int32_t ret = BnExtend(r, maxSize);
if (ret != CRYPT_SUCCESS) {
return ret;
}
BN_UINT *rr = r->data;
const BN_UINT *aa = a->data;
const BN_UINT *bb = b->data;
BN_UINT borrow = BinSub(rr, aa, bb, minSize);
rr += minSize;
aa += minSize;
uint32_t diff = maxSize - minSize;
while (diff > 0) {
BN_UINT t = *aa;
aa++;
*rr = t - borrow;
rr++;
borrow = t < borrow;
diff--;
}
while (maxSize != 0) {
rr--;
if (*rr != 0) {
break;
}
maxSize--;
}
r->size = maxSize;
return CRYPT_SUCCESS;
}
void UDec(BN_BigNum *r, const BN_BigNum *a, BN_UINT w)
{
uint32_t size = a->size;
// the user should guaranteed size > 1, the return value must be 0 thus the return value is ignored
(void)BinDec(r->data, a->data, size, w);
r->size = BinFixSize(r->data, size);
}
int32_t UAdd(BN_BigNum *r, const BN_BigNum *a, const BN_BigNum *b)
{
const BN_BigNum *max = (a->size < b->size) ? b : a;
const BN_BigNum *min = (a->size < b->size) ? a : b;
uint32_t maxSize = max->size;
uint32_t minSize = min->size;
// Ensure that r is sufficient to carry the sum.
int32_t ret = BnExtend(r, maxSize + 1);
if (ret != CRYPT_SUCCESS) {
return ret;
}
r->size = maxSize;
BN_UINT *rr = r->data;
const BN_UINT *aa = max->data;
const BN_UINT *bb = min->data;
BN_UINT carry = BinAdd(rr, aa, bb, minSize);
rr += minSize;
aa += minSize;
uint32_t diff = maxSize - minSize;
while (diff > 0) {
ADD_AB(carry, *rr, *aa, carry);
aa++, rr++, diff--;
}
if (carry != 0) {
*rr = carry;
r->size += 1;
}
return CRYPT_SUCCESS;
}
#endif /* HITLS_CRYPTO_BN */
| 2302_82127028/openHiTLS-examples_1508 | crypto/bn/src/bn_ucal.c | C | unknown | 2,714 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#ifndef BN_UCAL_H
#define BN_UCAL_H
#include "hitls_build.h"
#ifdef HITLS_CRYPTO_BN
#include "bn_basic.h"
#ifdef __cplusplus
extern "C" {
#endif
/* unsigned BigNum subtraction, caution: The input parameter validity must be ensured during external invoking. */
int32_t USub(BN_BigNum *r, const BN_BigNum *a, const BN_BigNum *b);
/* unsigned BigNum add fraction, caution: The input parameter validity must be ensured during external invoking. */
void UDec(BN_BigNum *r, const BN_BigNum *a, BN_UINT w);
/* unsigned BigNum addition, caution: The input parameter validity must be ensured during external invoking. */
int32_t UAdd(BN_BigNum *r, const BN_BigNum *a, const BN_BigNum *b);
#ifdef __cplusplus
}
#endif
#endif /* HITLS_CRYPTO_BN */
#endif | 2302_82127028/openHiTLS-examples_1508 | crypto/bn/src/bn_ucal.h | C | unknown | 1,296 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#include "hitls_build.h"
#ifdef HITLS_CRYPTO_BN
#include "securec.h"
#include "bsl_err_internal.h"
#include "crypt_errno.h"
#include "bn_basic.h"
#include "bn_bincal.h"
#include "bsl_sal.h"
int32_t BN_Bin2Bn(BN_BigNum *r, const uint8_t *bin, uint32_t binLen)
{
if (r == NULL || bin == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return CRYPT_NULL_INPUT;
}
(void)BN_Zeroize(r);
uint32_t zeroNum = 0;
for (; zeroNum < binLen; zeroNum++) {
if (bin[zeroNum] != 0) {
break;
}
}
if (zeroNum == binLen) {
// All data is 0.
return CRYPT_SUCCESS;
}
const uint8_t *base = bin + zeroNum;
uint32_t left = binLen - zeroNum;
uint32_t needRooms = (left % sizeof(BN_UINT) == 0) ? left / sizeof(BN_UINT)
: (left / sizeof(BN_UINT)) + 1;
int32_t ret = BnExtend(r, needRooms);
if (ret != CRYPT_SUCCESS) {
return ret;
}
uint32_t offset = 0;
while (left > 0) {
BN_UINT num = 0; // single number
uint32_t m = (left >= sizeof(BN_UINT)) ? sizeof(BN_UINT) : left;
uint32_t i;
for (i = m; i > 0; i--) { // big-endian
num = (num << 8) | base[left - i]; // 8: indicates the number of bits in a byte.
}
r->data[offset++] = num;
left -= m;
}
r->size = BinFixSize(r->data, offset);
return CRYPT_SUCCESS;
}
/* convert BN_UINT to bin */
static inline void Limb2Bin(uint8_t *bin, BN_UINT num)
{
// convert BN_UINT to bin: buff[0] is the most significant bit.
uint32_t i;
for (i = 0; i < sizeof(BN_UINT); i++) { // big-endian
bin[sizeof(BN_UINT) - i - 1] = (uint8_t)(num >> (8 * i)); // 8: indicates the number of bits in a byte.
}
}
int32_t BN_Bn2Bin(const BN_BigNum *a, uint8_t *bin, uint32_t *binLen)
{
if (a == NULL || bin == NULL || binLen == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return CRYPT_NULL_INPUT;
}
uint32_t bytes = BN_Bytes(a);
bytes = (bytes == 0) ? 1 : bytes; // If bytes is 0, 1 byte 0 data needs to be output.
if (*binLen < bytes) {
BSL_ERR_PUSH_ERROR(CRYPT_BN_BUFF_LEN_NOT_ENOUGH);
return CRYPT_BN_BUFF_LEN_NOT_ENOUGH;
}
int32_t ret = BN_Bn2BinFixZero(a, bin, bytes);
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
return ret;
}
*binLen = bytes;
return ret;
}
void BN_FixSize(BN_BigNum *a)
{
if (a == NULL) {
return;
}
a->size = BinFixSize(a->data, a->size);
}
int32_t BN_Extend(BN_BigNum *a, uint32_t words)
{
return BnExtend(a, words);
}
// Padded 0s before bin to obtain the output data whose length is binLen.
int32_t BN_Bn2BinFixZero(const BN_BigNum *a, uint8_t *bin, uint32_t binLen)
{
if (a == NULL || bin == NULL || binLen == 0) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return CRYPT_NULL_INPUT;
}
uint32_t bytes = BN_Bytes(a);
if (binLen < bytes) {
BSL_ERR_PUSH_ERROR(CRYPT_BN_BUFF_LEN_NOT_ENOUGH);
return CRYPT_BN_BUFF_LEN_NOT_ENOUGH;
}
uint32_t fixLen = binLen - bytes;
uint8_t *base = bin + fixLen;
(void)memset_s(bin, binLen, 0, fixLen);
if (bytes == 0) {
return CRYPT_SUCCESS;
}
uint32_t index = a->size - 1;
uint32_t left = bytes % sizeof(BN_UINT); // High-order non-integrated data
uint32_t offset = 0;
while (left != 0) {
base[offset] = (uint8_t)((a->data[index] >> (8 * (left - 1))) & 0xFF); // 1byte = 8bit
left--;
offset++;
}
if (offset != 0) {
index--;
}
uint32_t num = bytes / sizeof(BN_UINT); // High-order non-integrated data
// Cyclically parse the entire data block.
for (uint32_t i = 0; i < num; i++) {
Limb2Bin(base + offset, a->data[index]);
index--;
offset += sizeof(BN_UINT);
}
return CRYPT_SUCCESS;
}
#if defined(HITLS_CRYPTO_CURVE_SM2_ASM) || \
((defined(HITLS_CRYPTO_CURVE_NISTP521) || defined(HITLS_CRYPTO_CURVE_NISTP384_ASM)) && \
defined(HITLS_CRYPTO_NIST_USE_ACCEL))
/* Convert BigNum to a 64-bit array in little-endian order. */
int32_t BN_Bn2U64Array(const BN_BigNum *a, uint64_t *array, uint32_t *len)
{
// Number of BN_UINTs that can be accommodated
const uint64_t capacity = ((uint64_t)(*len)) * (sizeof(uint64_t) / sizeof(BN_UINT));
if (a->size > capacity || *len == 0) {
BSL_ERR_PUSH_ERROR(CRYPT_BN_SPACE_NOT_ENOUGH);
return CRYPT_BN_SPACE_NOT_ENOUGH;
}
if (BN_IsZero(a)) {
*len = 1;
array[0] = 0;
return CRYPT_SUCCESS;
}
// BN_UINT is 64-bit or 32-bit. Select one during compilation.
if (sizeof(BN_UINT) == sizeof(uint64_t)) {
uint32_t i = 0;
for (; i < a->size; i++) {
array[i] = a->data[i];
}
*len = i;
}
if (sizeof(BN_UINT) == sizeof(uint32_t)) {
uint32_t i = 0;
uint32_t j = 0;
for (; i < a->size - 1; i += 2) { // processes 2 BN_UINT each time. Here, a->size >= 1
array[j] = a->data[i];
array[j] |= ((uint64_t)a->data[i + 1]) << 32; // in the upper 32 bits
j++;
}
// When a->size is an odd number, process the tail.
if (i < a->size) {
array[j++] = a->data[i];
}
*len = j;
}
return CRYPT_SUCCESS;
}
/* Convert a 64-bit array in little-endian order to a BigNum. */
int32_t BN_U64Array2Bn(BN_BigNum *r, const uint64_t *array, uint32_t len)
{
const uint64_t needRoom = ((uint64_t)len) * sizeof(uint64_t) / sizeof(BN_UINT);
if (r == NULL || array == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return CRYPT_NULL_INPUT;
}
if (needRoom > UINT32_MAX) {
BSL_ERR_PUSH_ERROR(CRYPT_BN_BITS_TOO_MAX);
return CRYPT_BN_BITS_TOO_MAX;
}
int32_t ret = BnExtend(r, (uint32_t)needRoom);
if (ret != CRYPT_SUCCESS) {
return ret;
}
(void)BN_Zeroize(r);
// BN_UINT is 64-bit or 32-bit. Select one during compilation.
if (sizeof(BN_UINT) == sizeof(uint64_t)) {
for (uint32_t i = 0; i < needRoom; i++) {
r->data[i] = array[i];
}
}
if (sizeof(BN_UINT) == sizeof(uint32_t)) {
for (uint64_t i = 0; i < len; i++) {
r->data[i * 2] = (BN_UINT)array[i]; // uint64_t is twice the width of uint32_t.
// obtain the upper 32 bits, uint64_t is twice the width of uint32_t.
r->data[i * 2 + 1] = (BN_UINT)(array[i] >> 32);
}
}
// can be forcibly converted to 32 bits because needRoom <= r->room
r->size = BinFixSize(r->data, (uint32_t)needRoom);
return CRYPT_SUCCESS;
}
#endif
#if defined(HITLS_CRYPTO_CURVE_SM2_ASM) || (defined(HITLS_CRYPTO_CURVE_NISTP256_ASM) && \
defined(HITLS_CRYPTO_NIST_USE_ACCEL))
int32_t BN_BN2Array(const BN_BigNum *src, BN_UINT *dst, uint32_t size)
{
if (size < src->size) {
BSL_ERR_PUSH_ERROR(CRYPT_BN_BUFF_LEN_NOT_ENOUGH);
return CRYPT_BN_BUFF_LEN_NOT_ENOUGH;
}
(void)memset_s(dst, size * sizeof(BN_UINT), 0, size * sizeof(BN_UINT));
for (uint32_t i = 0; i < src->size; i++) {
dst[i] = src->data[i];
}
return CRYPT_SUCCESS;
}
int32_t BN_Array2BN(BN_BigNum *dst, const BN_UINT *src, const uint32_t size)
{
int32_t ret = BnExtend(dst, size);
if (ret != CRYPT_SUCCESS) {
return ret;
}
// No error code is returned because the src has been checked NULL.
(void)BN_Zeroize(dst);
for (uint32_t i = 0; i < size; i++) {
dst->data[i] = src[i];
}
dst->size = BinFixSize(dst->data, size);
return CRYPT_SUCCESS;
}
#endif
#ifdef HITLS_CRYPTO_BN_STR_CONV
static const char HEX_MAP[] = "0123456789ABCDEF"; // Hexadecimal value corresponding to 0-15
#define BITS_OF_NUM 4
#define BITS_OF_BYTE 8
static bool IsXdigit(const char str, bool isHex)
{
if ((str >= '0') && (str <= '9')) {
return true;
}
if (isHex) {
if ((str >= 'A') && (str <= 'F')) {
return true;
}
if ((str >= 'a') && (str <= 'f')) {
return true;
}
}
return false;
}
static unsigned char StrToHex(char str)
{
if ((str >= '0') && (str <= '9')) {
return (unsigned char)(str - '0');
}
if ((str >= 'A') && (str <= 'F')) {
return (unsigned char)(str - 'A' + 10); // Hexadecimal. A~F offset 10
}
if ((str >= 'a') && (str <= 'f')) {
return (unsigned char)(str - 'a' + 10); // Hexadecimal. a~f offset 10
}
return 0x00; // Unexpected character string, which is processed as 0.
}
static int32_t CheckInputStr(int32_t *outLen, const char *str, int32_t *negtive, bool isHex)
{
int32_t len = 0;
int32_t strMax = BN_MAX_BITS / BITS_OF_NUM; // BigNum storage limit: 2^29 bits
const char *inputStr = str;
if (str[0] == '\0') {
BSL_ERR_PUSH_ERROR(CRYPT_BN_CONVERT_INPUT_INVALID);
return CRYPT_BN_CONVERT_INPUT_INVALID;
}
if (str[0] == '-') {
*negtive = 1;
inputStr++;
}
int32_t initStrLen = strlen(inputStr);
if (initStrLen == 0 || initStrLen > strMax) {
BSL_ERR_PUSH_ERROR(CRYPT_BN_CONVERT_INPUT_INVALID);
return CRYPT_BN_CONVERT_INPUT_INVALID;
}
while (len < initStrLen) {
if (!IsXdigit(inputStr[len++], isHex)) { // requires that the entire content of a character string must be valid
BSL_ERR_PUSH_ERROR(CRYPT_BN_CONVERT_INPUT_INVALID);
return CRYPT_BN_CONVERT_INPUT_INVALID;
}
}
*outLen = len;
return CRYPT_SUCCESS;
}
static int32_t OutputCheck(BN_BigNum **r, int32_t num)
{
uint32_t needBits = (uint32_t)num * BITS_OF_NUM;
if (*r == NULL) {
*r = BN_Create(needBits);
if (*r == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_MEM_ALLOC_FAIL);
return CRYPT_MEM_ALLOC_FAIL;
}
} else {
int32_t ret = BnExtend(*r, BITS_TO_BN_UNIT(needBits));
if (ret != CRYPT_SUCCESS) {
return ret;
}
(void)BN_Zeroize(*r);
}
return CRYPT_SUCCESS;
}
int32_t BN_Hex2Bn(BN_BigNum **r, const char *str)
{
int32_t ret;
int32_t len;
int32_t negtive = 0;
if (r == NULL || str == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return CRYPT_NULL_INPUT;
}
const char *inputStr = str;
ret = CheckInputStr(&len, inputStr, &negtive, true);
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
return ret;
}
ret = OutputCheck(r, len);
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
return ret;
}
BN_UINT *p = (*r)->data;
if (negtive != 0) {
inputStr++;
}
int32_t unitBytes;
uint32_t tmpval = 0;
uint32_t size = 0; // Record the size that r will use.
int32_t bytes = sizeof(BN_UINT);
BN_UINT unitValue;
while (len > 0) {
unitBytes = (bytes * 2 <= len) ? bytes * 2 : len; // Prevents the number of char left being less than bytes *2
unitValue = 0;
for (; unitBytes > 0; unitBytes--) {
// interface ensures that all characters are valid at the begining
tmpval = StrToHex(inputStr[len - unitBytes]);
unitValue = (unitValue << 4) | tmpval; // The upper bits are shifted rightwards by 4 bits each time.
}
p[size++] = unitValue;
len -= bytes * 2; // Length of the character stream processed each time = Number of bytes x 2
}
(*r)->size = BinFixSize(p, size);
if (!BN_IsZero(*r)) {
(*r)->sign = negtive;
}
return CRYPT_SUCCESS;
}
char *BN_Bn2Hex(const BN_BigNum *a)
{
uint32_t bytes = sizeof(BN_UINT);
if (a == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return NULL;
}
// output character stream = Number of bytes x 2 + minus sign + terminator
char *ret = (char *)BSL_SAL_Malloc(a->size * bytes * 2 + 2);
if (ret == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_MEM_ALLOC_FAIL);
return NULL;
}
char *p = ret;
if (BN_IsZero(a)) {
*p++ = '0';
*p++ = '\0';
return ret;
}
if (a->sign) {
*p++ = '-';
}
bool leadingZeros = true;
for (int32_t i = a->size - 1; i >= 0; i--) {
// processes data in a group of 8 bits
for (int32_t j = (int32_t)(bytes * BITS_OF_BYTE - BITS_OF_BYTE); j >= 0; j -= 8) {
uint32_t chars = (uint32_t)((a->data[i] >> (uint32_t)j) & 0xFF); // Take the last eight bits.
if (leadingZeros && (chars == 0)) {
continue;
}
*p++ = HEX_MAP[chars >> 4]; // Higher 4 bits
*p++ = HEX_MAP[chars & 0x0F]; // Lower 4 bits
leadingZeros = false;
}
}
*p = '\0';
return ret;
}
static int32_t CalBnData(BN_BigNum **r, int32_t num, const char *inputStr)
{
int32_t ret = CRYPT_INVALID_ARG;
int32_t optTimes;
int32_t len = num;
const char *p = inputStr;
BN_UINT unitValue = 0;
/*
* Processes decimal strings in groups of BN_DEC_LEN.
* If the length of a string is not a multiple of BN_DEC_LEN, then in the first round of string processing,
handle according to the actual length of less than BN_DEC_LEN
*/
optTimes = (len % BN_DEC_LEN == 0) ? 0 : (BN_DEC_LEN - len % BN_DEC_LEN);
while (len > 0) {
// keep the upper limit of each round of traversal as BN_DEC_LEN
for (; optTimes < BN_DEC_LEN; optTimes++, len--) {
unitValue *= 10; // A decimal number is multiplied by 10 and then added.
unitValue += *p - '0';
p++;
}
ret = BN_MulLimb(*r, *r, BN_DEC_VAL);
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
goto ERR;
}
ret = BN_AddLimb(*r, *r, unitValue);
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
goto ERR;
}
unitValue = 0;
optTimes = 0;
}
ERR:
return ret;
}
int32_t BN_Dec2Bn(BN_BigNum **r, const char *str)
{
int32_t ret;
int32_t num;
int32_t negtive = 0;
if (r == NULL || str == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return CRYPT_NULL_INPUT;
}
const char *inputStr = str;
ret = CheckInputStr(&num, inputStr, &negtive, false);
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
return ret;
}
ret = OutputCheck(r, num);
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
return ret;
}
if (negtive != 0) {
inputStr++;
}
ret = CalBnData(r, num, inputStr);
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
return ret;
}
if (!BN_IsZero(*r)) {
(*r)->sign = negtive;
}
return ret;
}
static int32_t CalDecStr(const BN_BigNum *a, BN_UINT *bnInit, uint32_t unitNum, uint32_t *step)
{
int32_t ret = CRYPT_INVALID_ARG;
BN_UINT *valNow = bnInit;
uint32_t index = 0;
BN_BigNum *bnDup = BN_Dup(a);
if (bnDup == NULL) {
ret = CRYPT_MEM_ALLOC_FAIL;
goto ERR;
}
while (!BN_IsZero(bnDup)) {
BN_UINT rem;
// index records the amount of BN_UINT offset, cannot exceed the maximum value unitNum
if (index == unitNum) {
ret = CRYPT_SECUREC_FAIL;
goto ERR;
}
ret = BN_DivLimb(bnDup, &rem, bnDup, BN_DEC_VAL);
if (ret != CRYPT_SUCCESS) {
goto ERR;
}
valNow[index++] = rem;
}
(*step) = index - 1;
ERR:
BN_Destroy(bnDup);
return ret;
}
static int32_t NumToStr(char *output, uint32_t *restLen, BN_UINT valNow, bool isNeedPad, uint32_t *printNum)
{
BN_UINT num = valNow;
char *target = output;
uint32_t len = 0;
do {
if (*restLen < len + 1) {
BSL_ERR_PUSH_ERROR(CRYPT_BN_BUFF_LEN_NOT_ENOUGH);
return CRYPT_BN_BUFF_LEN_NOT_ENOUGH;
}
// The ASCII code of 0 to 9 is [48, 57]
target[len++] = num % 10 + 48; // Take last num by mod 10, and convet to 'char'.
num /= 10; // for taken the last digit by dividing 10
} while (num != 0);
if (isNeedPad) {
if (*restLen < BN_DEC_LEN) {
BSL_ERR_PUSH_ERROR(CRYPT_BN_BUFF_LEN_NOT_ENOUGH);
return CRYPT_BN_BUFF_LEN_NOT_ENOUGH;
}
while (len < BN_DEC_LEN) {
target[len++] = '0';
}
}
// Symmetrically swapped values at both ends, needs len / 2 times.
for (uint32_t j = 0; j < len / 2; j++) {
char t = target[j];
target[j] = target[len - 1 - j];
target[len - 1 - j] = t;
}
*restLen -= len;
*printNum = len;
return CRYPT_SUCCESS;
}
static int32_t FmtDecOutput(char *output, uint32_t outLen, const BN_UINT *bnInit, uint32_t steps)
{
uint32_t cpyNum = 0;
char *outputPtr = output;
uint32_t index = steps;
uint32_t restLen = outLen - 1; // Reserve the position of the terminator.
int32_t ret = NumToStr(outputPtr, &restLen, *(bnInit + index), false, &cpyNum);
if (ret != CRYPT_SUCCESS) {
return ret;
}
outputPtr += cpyNum;
while (index-- != 0) {
ret = NumToStr(outputPtr, &restLen, *(bnInit + index), true, &cpyNum);
if (ret != CRYPT_SUCCESS) {
return ret;
}
outputPtr += cpyNum;
}
*outputPtr = '\0';
return CRYPT_SUCCESS;
}
char *BN_Bn2Dec(const BN_BigNum *a)
{
if (a == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return NULL;
}
int32_t ret;
char *p = NULL;
uint32_t steps = 0;
/*
* Estimate the maximum length of a decimal BigNum
* x <= 10 ^ y < 2 ^ (bit + 1)
* y < lg_(2) ( 2 ^ (bit + 1))
* y < (bit + 1) * lg2 -- (lg_2 = 0.30102999566...)
* y < (bit + 1) * 0.303
* y < 3 * bit * 0.001 + 3 * bit * 0.100 + 1
*/
uint32_t numLen = (BN_Bits(a) * 3) / 10 + (BN_Bits(a) * 3) / 1000 + 1;
uint32_t outLen = numLen + 3; // Add the sign, end symbol, and buffer space.
uint32_t unitNum = (numLen / BN_DEC_LEN) + 1;
char *result = BSL_SAL_Malloc(outLen);
BN_UINT *bnInit = (BN_UINT *)BSL_SAL_Malloc(unitNum * sizeof(BN_UINT));
if (result == NULL || bnInit == NULL) {
ret = CRYPT_MEM_ALLOC_FAIL;
BSL_ERR_PUSH_ERROR(CRYPT_MEM_ALLOC_FAIL);
goto ERR;
}
p = result;
if (BN_IsZero(a)) {
*p++ = '0';
*p++ = '\0';
ret = CRYPT_SUCCESS;
goto ERR;
}
if (a->sign) {
*p++ = '-';
outLen--;
}
ret = CalDecStr(a, bnInit, unitNum, &steps);
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
goto ERR;
}
ret = FmtDecOutput(p, outLen, bnInit, steps);
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
goto ERR;
}
ERR:
BSL_SAL_FREE(bnInit);
if (ret == CRYPT_SUCCESS) {
return result;
}
BSL_SAL_FREE(result);
return NULL;
}
#endif /* HITLS_CRYPTO_BN_STR_CONV */
#endif /* HITLS_CRYPTO_BN */
| 2302_82127028/openHiTLS-examples_1508 | crypto/bn/src/bn_utils.c | C | unknown | 19,706 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#include "hitls_build.h"
#ifdef HITLS_CRYPTO_BN
#include <stdint.h>
#include "bn_bincal.h"
/* r = a + b, the length of r, a and b array is n. The return value is the carry. */
BN_UINT BinAdd(BN_UINT *r, const BN_UINT *a, const BN_UINT *b, uint32_t n)
{
BN_UINT carry = 0;
uint32_t nn = n;
const BN_UINT *aa = a;
const BN_UINT *bb = b;
BN_UINT *rr = r;
#ifndef HITLS_CRYPTO_BN_SMALL_MEM
while (nn >= 4) { /* Process 4 groups in batches. */
ADD_ABC(carry, rr[0], aa[0], bb[0], carry); /* offset 0 */
ADD_ABC(carry, rr[1], aa[1], bb[1], carry); /* offset 1 */
ADD_ABC(carry, rr[2], aa[2], bb[2], carry); /* offset 2 */
ADD_ABC(carry, rr[3], aa[3], bb[3], carry); /* offset 3 */
rr += 4; /* a group of 4 */
aa += 4; /* a group of 4 */
bb += 4; /* a group of 4 */
nn -= 4; /* a group of 4 */
}
#endif
uint32_t i = 0;
for (; i < nn; i++) {
ADD_ABC(carry, rr[i], aa[i], bb[i], carry);
}
return carry;
}
/* r = a - b, the length of r, a and b array is n. The return value is the borrow-digit. */
BN_UINT BinSub(BN_UINT *r, const BN_UINT *a, const BN_UINT *b, uint32_t n)
{
BN_UINT borrow = 0;
uint32_t nn = n;
const BN_UINT *aa = a;
const BN_UINT *bb = b;
BN_UINT *rr = r;
#ifndef HITLS_CRYPTO_BN_SMALL_MEM
while (nn >= 4) { /* Process 4 groups in batches. */
SUB_ABC(borrow, rr[0], aa[0], bb[0], borrow); /* offset 0 */
SUB_ABC(borrow, rr[1], aa[1], bb[1], borrow); /* offset 1 */
SUB_ABC(borrow, rr[2], aa[2], bb[2], borrow); /* offset 2 */
SUB_ABC(borrow, rr[3], aa[3], bb[3], borrow); /* offset 3 */
rr += 4; /* a group of 4 */
aa += 4; /* a group of 4 */
bb += 4; /* a group of 4 */
nn -= 4; /* a group of 4 */
}
#endif
uint32_t i = 0;
for (; i < nn; i++) {
SUB_ABC(borrow, rr[i], aa[i], bb[i], borrow);
}
return borrow;
}
/* Obtains the number of 0s in the first x most significant bits of data. */
uint32_t GetZeroBitsUint(BN_UINT x)
{
BN_UINT iter;
BN_UINT tmp = x;
uint32_t bits = BN_UNIT_BITS;
uint32_t base = BN_UNIT_BITS >> 1;
do {
iter = tmp >> base;
if (iter != 0) {
tmp = iter;
bits -= base;
}
base = base >> 1;
} while (base != 0);
return (uint32_t)(bits - tmp);
}
/* Multiply and then subtract. The return value is borrow digit. */
BN_UINT BinSubMul(BN_UINT *r, const BN_UINT *a, BN_UINT aSize, BN_UINT m)
{
BN_UINT borrow = 0;
uint32_t i;
for (i = 0; i < aSize; i++) {
BN_UINT ah, al;
MUL_AB(ah, al, a[i], m);
SUB_ABC(borrow, r[i], r[i], al, borrow);
borrow += ah;
}
return borrow;
}
#endif /* HITLS_CRYPTO_BN */
| 2302_82127028/openHiTLS-examples_1508 | crypto/bn/src/noasm_bn_bincal.c | C | unknown | 3,341 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#include "hitls_build.h"
#ifdef HITLS_CRYPTO_BN
#include <stdint.h>
#include <stdbool.h>
#include "bn_bincal.h"
/* reduce(r * r) */
int32_t MontSqrBin(BN_UINT *r, BN_Mont *mont, BN_Optimizer *opt, bool consttime)
{
return MontSqrBinCore(r, mont, opt, consttime);
}
int32_t MontMulBin(BN_UINT *r, const BN_UINT *a, const BN_UINT *b, BN_Mont *mont,
BN_Optimizer *opt, bool consttime)
{
return MontMulBinCore(r, a, b, mont, opt, consttime);
}
int32_t MontEncBin(BN_UINT *r, BN_Mont *mont, BN_Optimizer *opt, bool consttime)
{
return MontEncBinCore(r, mont, opt, consttime);
}
void Reduce(BN_UINT *r, BN_UINT *x, const BN_UINT *one, const BN_UINT *m, uint32_t mSize, BN_UINT m0)
{
(void)one;
ReduceCore(r, x, m, mSize, m0);
}
#endif /* HITLS_CRYPTO_BN */
| 2302_82127028/openHiTLS-examples_1508 | crypto/bn/src/noasm_bn_mont.c | C | unknown | 1,325 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#include "hitls_build.h"
#ifdef HITLS_CRYPTO_BN
#include <stdint.h>
#include "bn_bincal.h"
#ifndef HITLS_SIXTY_FOUR_BITS
#error Bn binical x8664 optimizer must open BN-64.
#endif
// r = a + b, len = n, return carry
BN_UINT BinAdd(BN_UINT *r, const BN_UINT *a, const BN_UINT *b, uint32_t n)
{
if (n == 0) {
return 0;
}
BN_UINT ret = n;
asm volatile (
".p2align 4 \n"
" mov %0, %%rcx \n"
" and $3, %%rcx \n" // will clear CF
" shr $2, %0 \n"
" clc \n"
" jz aone \n" // n / 4 > = 0 , goto step 4
"4: movq 0(%2), %%r8 \n"
" movq 8(%2), %%r9 \n"
" movq 16(%2), %%r10 \n"
" movq 24(%2), %%r11 \n"
" adcq 0(%3), %%r8 \n"
" adcq 8(%3), %%r9 \n"
" adcq 16(%3), %%r10 \n"
" adcq 24(%3), %%r11 \n"
" movq %%r8, 0(%1) \n"
" movq %%r9, 8(%1) \n"
" movq %%r10, 16(%1) \n"
" movq %%r11, 24(%1) \n"
" lea 32(%1), %1 \n"
" lea 32(%2), %2 \n"
" lea 32(%3), %3 \n"
" dec %0 \n"
" jnz 4b \n"
"aone: jrcxz eadd \n" // n % 4 == 0, goto end
"1: movq (%2,%0,8), %%r8 \n"
" adcq (%3,%0,8), %%r8 \n"
" movq %%r8, (%1,%0,8) \n"
" inc %0 \n"
" dec %%rcx \n"
" jnz 1b \n"
"eadd: sbbq %0, %0 \n"
:"+&r" (ret)
:"r"(r), "r"(a), "r"(b)
:"r8", "r9", "r10", "r11", "rcx", "cc", "memory");
return ret & 1;
}
// r = a - b, len = n, return carry
BN_UINT BinSub(BN_UINT *r, const BN_UINT *a, const BN_UINT *b, uint32_t n)
{
if (n == 0) {
return 0;
}
BN_UINT res = n;
asm volatile (
".p2align 4 \n"
" mov %0, %%rcx \n"
" and $3, %%rcx \n"
" shr $2, %0 \n"
" clc \n"
" jz sone \n" // n / 4 > = 0 , goto step 4
"4: movq 0(%2), %%r8 \n"
" movq 8(%2), %%r9 \n"
" movq 16(%2), %%r10 \n"
" movq 24(%2), %%r11 \n"
" sbbq 0(%3), %%r8 \n"
" sbbq 8(%3), %%r9 \n"
" sbbq 16(%3), %%r10 \n"
" sbbq 24(%3), %%r11 \n"
" movq %%r8, 0(%1) \n"
" movq %%r9, 8(%1) \n"
" movq %%r10, 16(%1) \n"
" movq %%r11, 24(%1) \n"
" lea 32(%1), %1 \n"
" lea 32(%2), %2 \n"
" lea 32(%3), %3 \n"
" dec %0 \n"
" jnz 4b \n"
"sone: jrcxz esub \n" // n % 4 == 0, goto end
"1: movq (%2,%0,8), %%r8 \n"
" sbbq (%3,%0,8), %%r8 \n"
" movq %%r8, (%1,%0,8) \n"
" inc %0 \n"
" dec %%rcx \n"
" jnz 1b \n"
"esub: sbbq %0, %0 \n"
:"+&r" (res)
:"r"(r), "r"(a), "r"(b)
:"r8", "r9", "r10", "r11", "rcx", "cc", "memory");
return res & 1;
}
// r = r - a * m, return the carry;
BN_UINT BinSubMul(BN_UINT *r, const BN_UINT *a, BN_UINT aSize, BN_UINT m)
{
BN_UINT borrow = 0;
BN_UINT i = 0;
asm volatile (
".p2align 4 \n"
"endy: movq %5, %%rax \n" // rax = m
" mulq (%4,%1,8) \n" // rax -> al, rdx -> ah
" addq %0, %%rax \n" // rax = al + borrow
" adcq $0, %%rdx \n" // if has carry, rdx++
" subq %%rax, (%3,%1,8) \n" // r[i] = r[i] - (al + borrow)
" adcq $0, %%rdx \n" // if has carry, borrow++
" movq %%rdx, %0 \n"
" inc %1 \n"
" dec %2 \n"
" jnz endy \n"
:"+&r" (borrow), "+r"(i), "+r"(aSize)
:"r"(r), "r"(a), "r"(m)
:"rax", "rdx", "cc", "memory");
return borrow;
}
/* Obtains the number of 0s in the first x most significant bits of data. */
uint32_t GetZeroBitsUint(BN_UINT x)
{
BN_UINT iter;
BN_UINT tmp = x;
uint32_t bits = BN_UNIT_BITS;
uint32_t base = BN_UNIT_BITS >> 1;
do {
iter = tmp >> base;
if (iter != 0) {
tmp = iter;
bits -= base;
}
base = base >> 1;
} while (base != 0);
return bits - tmp;
}
#endif /* HITLS_CRYPTO_BN */
| 2302_82127028/openHiTLS-examples_1508 | crypto/bn/src/x8664_bn_bincal.c | C | unknown | 6,809 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#ifndef CRYPT_CHACHA20_H
#define CRYPT_CHACHA20_H
#include "hitls_build.h"
#ifdef HITLS_CRYPTO_CHACHA20
#include <stdint.h>
#include "crypt_types.h"
#ifdef __cplusplus
extern "C" {
#endif // __cplusplus
#define CHACHA20_STATESIZE 16
#define CHACHA20_STATEBYTES (CHACHA20_STATESIZE * sizeof(uint32_t))
#define CHACHA20_KEYLEN 32
#define CHACHA20_NONCELEN 12
typedef struct {
uint32_t state[CHACHA20_STATESIZE]; // state RFC 7539
union {
uint32_t c[CHACHA20_STATESIZE];
uint8_t u[CHACHA20_STATEBYTES];
} last; // save the last data
uint32_t lastLen; // remaining length of the last data in bytes
uint8_t set; // indicates whether the key and nonce are set
} CRYPT_CHACHA20_Ctx;
int32_t CRYPT_CHACHA20_SetKey(CRYPT_CHACHA20_Ctx *ctx, const uint8_t *key, uint32_t keyLen);
int32_t CRYPT_CHACHA20_Update(CRYPT_CHACHA20_Ctx *ctx, const uint8_t *in, uint8_t *out, uint32_t len);
int32_t CRYPT_CHACHA20_Ctrl(CRYPT_CHACHA20_Ctx *ctx, int32_t opt, void *val, uint32_t len);
void CRYPT_CHACHA20_Clean(CRYPT_CHACHA20_Ctx *ctx);
#ifdef __cplusplus
}
#endif // __cplusplus
#endif // HITLS_CRYPTO_CHACHA20
#endif // CRYPT_CHACHA20_H
| 2302_82127028/openHiTLS-examples_1508 | crypto/chacha20/include/crypt_chacha20.h | C | unknown | 1,710 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#include "hitls_build.h"
#ifdef HITLS_CRYPTO_CHACHA20
.text
.macro CHA256_SET_VDATA
mov VREG01.16b, VSIGMA.16b
mov VREG11.16b, VSIGMA.16b
mov VREG21.16b, VSIGMA.16b
mov VREG02.16b, VKEY01.16b
mov VREG12.16b, VKEY01.16b
mov VREG22.16b, VKEY01.16b
mov VREG03.16b, VKEY02.16b
mov VREG13.16b, VKEY02.16b
mov VREG23.16b, VKEY02.16b
mov VREG04.16b, VREG52.16b // 1
mov VREG14.16b, VREG53.16b // 2
mov VREG24.16b, VREG54.16b // 3
.endm
.macro CHA256_ROUND_A
add WINPUT0, WINPUT0, WINPUT4 // A+B
add VREG01.4s, VREG01.4s, VREG02.4s
add WINPUT1, WINPUT1, WINPUT5 // A+B
add VREG11.4s, VREG11.4s, VREG12.4s
add WINPUT2, WINPUT2, WINPUT6 // A+B
add VREG21.4s, VREG21.4s, VREG22.4s
add WINPUT3, WINPUT3, WINPUT7 // A+B
eor VREG04.16b, VREG04.16b, VREG01.16b
eor WINPUT12, WINPUT12, WINPUT0 // D^A
eor VREG14.16b, VREG14.16b, VREG11.16b
eor WINPUT13, WINPUT13, WINPUT1 // D^A
eor VREG24.16b, VREG24.16b, VREG21.16b
eor WINPUT14, WINPUT14, WINPUT2 // D^A
rev32 VREG04.8h, VREG04.8h
eor WINPUT15, WINPUT15, WINPUT3 // D^A
rev32 VREG14.8h, VREG14.8h
ror WINPUT12, WINPUT12, #16 // D>>>16
rev32 VREG24.8h, VREG24.8h
ror WINPUT13, WINPUT13, #16 // D>>>16
add VREG03.4s, VREG03.4s, VREG04.4s
ror WINPUT14, WINPUT14, #16 // D>>>16
add VREG13.4s, VREG13.4s, VREG14.4s
ror WINPUT15, WINPUT15, #16 // D>>>16
add VREG23.4s, VREG23.4s, VREG24.4s
add WINPUT8, WINPUT8, WINPUT12 // C+D
eor VREG41.16b, VREG03.16b, VREG02.16b
add WINPUT9, WINPUT9, WINPUT13 // C+D
eor VREG42.16b, VREG13.16b, VREG12.16b
add WINPUT10, WINPUT10, WINPUT14 // C+D
eor VREG43.16b, VREG23.16b, VREG22.16b
add WINPUT11, WINPUT11, WINPUT15 // C+D
ushr VREG02.4s, VREG41.4s, #20
eor WINPUT4, WINPUT4, WINPUT8 // B^C
ushr VREG12.4s, VREG42.4s, #20
eor WINPUT5, WINPUT5, WINPUT9 // B^C
ushr VREG22.4s, VREG43.4s, #20
eor WINPUT6, WINPUT6, WINPUT10 // B^C
sli VREG02.4s, VREG41.4s, #12
eor WINPUT7, WINPUT7, WINPUT11 // B^C
sli VREG12.4s, VREG42.4s, #12
ror WINPUT4, WINPUT4, #20 // B>>>20
sli VREG22.4s, VREG43.4s, #12
ror WINPUT5, WINPUT5, #20 // B>>>20
add VREG01.4s, VREG01.4s, VREG02.4s
ror WINPUT6, WINPUT6, #20 // B>>>20
add VREG11.4s, VREG11.4s, VREG12.4s
ror WINPUT7, WINPUT7, #20 // B>>>20
add VREG21.4s, VREG21.4s, VREG22.4s
add WINPUT0, WINPUT0, WINPUT4 // A+B
eor VREG41.16b, VREG04.16b, VREG01.16b
add WINPUT1, WINPUT1, WINPUT5 // A+B
eor VREG42.16b, VREG14.16b, VREG11.16b
add WINPUT2, WINPUT2, WINPUT6 // A+B
eor VREG43.16b, VREG24.16b, VREG21.16b
add WINPUT3, WINPUT3, WINPUT7 // A+B
ushr VREG04.4s, VREG41.4s, #24
eor WINPUT12, WINPUT12, WINPUT0 // D^A
ushr VREG14.4s, VREG42.4s, #24
eor WINPUT13, WINPUT13, WINPUT1 // D^A
ushr VREG24.4s, VREG43.4s, #24
eor WINPUT14, WINPUT14, WINPUT2 // D^A
sli VREG04.4s, VREG41.4s, #8
eor WINPUT15, WINPUT15, WINPUT3 // D^A
sli VREG14.4s, VREG42.4s, #8
ror WINPUT12, WINPUT12, #24 // D>>>24
sli VREG24.4s, VREG43.4s, #8
ror WINPUT13, WINPUT13, #24 // D>>>24
add VREG03.4s, VREG03.4s, VREG04.4s
ror WINPUT14, WINPUT14, #24 // D>>>24
add VREG13.4s, VREG13.4s, VREG14.4s
ror WINPUT15, WINPUT15, #24 // D>>>24
add VREG23.4s, VREG23.4s, VREG24.4s
add WINPUT8, WINPUT8, WINPUT12 // C+D
eor VREG41.16b, VREG03.16b, VREG02.16b
add WINPUT9, WINPUT9, WINPUT13 // C+D
eor VREG42.16b, VREG13.16b, VREG12.16b
add WINPUT10, WINPUT10, WINPUT14 // C+D
eor VREG43.16b, VREG23.16b, VREG22.16b
add WINPUT11, WINPUT11, WINPUT15 // C+D
ushr VREG02.4s, VREG41.4s, #25
eor WINPUT4, WINPUT4, WINPUT8 // B^C
ushr VREG12.4s, VREG42.4s, #25
eor WINPUT5, WINPUT5, WINPUT9 // B^C
ushr VREG22.4s, VREG43.4s, #25
eor WINPUT6, WINPUT6, WINPUT10 // B^C
sli VREG02.4s, VREG41.4s, #7
eor WINPUT7, WINPUT7, WINPUT11 // B^C
sli VREG12.4s, VREG42.4s, #7
ror WINPUT4, WINPUT4, #25 // B>>>25
sli VREG22.4s, VREG43.4s, #7
ror WINPUT5, WINPUT5, #25 // B>>>25
ext VREG03.16b, VREG03.16b, VREG03.16b, #8
ror WINPUT6, WINPUT6, #25 // B>>>25
ext VREG13.16b, VREG13.16b, VREG13.16b, #8
ror WINPUT7, WINPUT7, #25 // B>>>25
ext VREG23.16b, VREG23.16b, VREG23.16b, #8
.endm
.macro CHA256_ROUND_B
add WINPUT0, WINPUT0, WINPUT5 // A+B
add VREG01.4s, VREG01.4s, VREG02.4s
add WINPUT1, WINPUT1, WINPUT6 // A+B
add VREG11.4s, VREG11.4s, VREG12.4s
add WINPUT2, WINPUT2, WINPUT7 // A+B
add VREG21.4s, VREG21.4s, VREG22.4s
add WINPUT3, WINPUT3, WINPUT4 // A+B
eor VREG04.16b, VREG04.16b, VREG01.16b
eor WINPUT15, WINPUT15, WINPUT0 // D^A
eor VREG14.16b, VREG14.16b, VREG11.16b
eor WINPUT12, WINPUT12, WINPUT1 // D^A
eor VREG24.16b, VREG24.16b, VREG21.16b
eor WINPUT13, WINPUT13, WINPUT2 // D^A
rev32 VREG04.8h, VREG04.8h
eor WINPUT14, WINPUT14, WINPUT3 // D^A
rev32 VREG14.8h, VREG14.8h
ror WINPUT12, WINPUT12, #16 // D>>>16
rev32 VREG24.8h, VREG24.8h
ror WINPUT13, WINPUT13, #16 // D>>>16
add VREG03.4s, VREG03.4s, VREG04.4s
ror WINPUT14, WINPUT14, #16 // D>>>16
add VREG13.4s, VREG13.4s, VREG14.4s
ror WINPUT15, WINPUT15, #16 // D>>>16
add VREG23.4s, VREG23.4s, VREG24.4s
add WINPUT10, WINPUT10, WINPUT15 // C+D
eor VREG41.16b, VREG03.16b, VREG02.16b
add WINPUT11, WINPUT11, WINPUT12 // C+D
eor VREG42.16b, VREG13.16b, VREG12.16b
add WINPUT8, WINPUT8, WINPUT13 // C+D
eor VREG43.16b, VREG23.16b, VREG22.16b
add WINPUT9, WINPUT9, WINPUT14 // C+D
ushr VREG02.4s, VREG41.4s, #20
eor WINPUT5, WINPUT5, WINPUT10 // B^C
ushr VREG12.4s, VREG42.4s, #20
eor WINPUT6, WINPUT6, WINPUT11 // B^C
ushr VREG22.4s, VREG43.4s, #20
eor WINPUT7, WINPUT7, WINPUT8 // B^C
sli VREG02.4s, VREG41.4s, #12
eor WINPUT4, WINPUT4, WINPUT9 // B^C
sli VREG12.4s, VREG42.4s, #12
ror WINPUT4, WINPUT4, #20 // B>>>20
sli VREG22.4s, VREG43.4s, #12
ror WINPUT5, WINPUT5, #20 // B>>>20
add VREG01.4s, VREG01.4s, VREG02.4s
ror WINPUT6, WINPUT6, #20 // B>>>20
add VREG11.4s, VREG11.4s, VREG12.4s
ror WINPUT7, WINPUT7, #20 // B>>>20
add VREG21.4s, VREG21.4s, VREG22.4s
add WINPUT0, WINPUT0, WINPUT5 // A+B
eor VREG41.16b, VREG04.16b, VREG01.16b
add WINPUT1, WINPUT1, WINPUT6 // A+B
eor VREG42.16b, VREG14.16b, VREG11.16b
add WINPUT2, WINPUT2, WINPUT7 // A+B
eor VREG43.16b, VREG24.16b, VREG21.16b
add WINPUT3, WINPUT3, WINPUT4 // A+B
ushr VREG04.4s, VREG41.4s, #24
eor WINPUT15, WINPUT15, WINPUT0 // D^A
ushr VREG14.4s, VREG42.4s, #24
eor WINPUT12, WINPUT12, WINPUT1 // D^A
ushr VREG24.4s, VREG43.4s, #24
eor WINPUT13, WINPUT13, WINPUT2 // D^A
sli VREG04.4s, VREG41.4s, #8
eor WINPUT14, WINPUT14, WINPUT3 // D^A
sli VREG14.4s, VREG42.4s, #8
ror WINPUT12, WINPUT12, #24 // D>>>24
sli VREG24.4s, VREG43.4s, #8
ror WINPUT13, WINPUT13, #24
add VREG03.4s, VREG03.4s, VREG04.4s
ror WINPUT14, WINPUT14, #24
add VREG13.4s, VREG13.4s, VREG14.4s
ror WINPUT15, WINPUT15, #24
add VREG23.4s, VREG23.4s, VREG24.4s
add WINPUT10, WINPUT10, WINPUT15 // C+D
eor VREG41.16b, VREG03.16b, VREG02.16b
add WINPUT11, WINPUT11, WINPUT12 // C+D
eor VREG42.16b, VREG13.16b, VREG12.16b
add WINPUT8, WINPUT8, WINPUT13 // C+D
eor VREG43.16b, VREG23.16b, VREG22.16b
add WINPUT9, WINPUT9, WINPUT14 // C+D
ushr VREG02.4s, VREG41.4s, #25
eor WINPUT5, WINPUT5, WINPUT10 // B^C
ushr VREG12.4s, VREG42.4s, #25
eor WINPUT6, WINPUT6, WINPUT11
ushr VREG22.4s, VREG43.4s, #25
eor WINPUT7, WINPUT7, WINPUT8
sli VREG02.4s, VREG41.4s, #7
eor WINPUT4, WINPUT4, WINPUT9
sli VREG12.4s, VREG42.4s, #7
ror WINPUT4, WINPUT4, #25 // B>>>25
sli VREG22.4s, VREG43.4s, #7
ror WINPUT5, WINPUT5, #25
ext VREG03.16b, VREG03.16b, VREG03.16b, #8
ror WINPUT6, WINPUT6, #25
ext VREG13.16b, VREG13.16b, VREG13.16b, #8
ror WINPUT7, WINPUT7, #25
ext VREG23.16b, VREG23.16b, VREG23.16b, #8
.endm
.macro CHA256_ROUND_END
add VREG01.4s, VREG01.4s, VSIGMA.4s // After the cycle is complete, add input.
add VREG11.4s, VREG11.4s, VSIGMA.4s
add VREG21.4s, VREG21.4s, VSIGMA.4s
add VREG02.4s, VREG02.4s, VKEY01.4s // After the cycle is complete, add input.
add VREG12.4s, VREG12.4s, VKEY01.4s
add VREG22.4s, VREG22.4s, VKEY01.4s
add VREG03.4s, VREG03.4s, VKEY02.4s // After the cycle is complete, add input.
add VREG13.4s, VREG13.4s, VKEY02.4s
add VREG23.4s, VREG23.4s, VKEY02.4s
add VREG04.4s, VREG04.4s, VREG52.4s // 0
add VREG14.4s, VREG14.4s, VREG53.4s // 1
add VREG24.4s, VREG24.4s, VREG54.4s // 2
.endm
.macro CHA256_WRITE_BACK
ld1 {VREG41.16b, VREG42.16b, VREG43.16b, VREG44.16b}, [REGINC], #64 // Load 64 bytes.
eor XINPUT0, XINPUT0, XINPUT1
eor XINPUT2, XINPUT2, XINPUT3
eor XINPUT4, XINPUT4, XINPUT5
eor XINPUT6, XINPUT6, XINPUT7
eor XINPUT8, XINPUT8, XINPUT9
stp XINPUT0, XINPUT2, [REGOUT], #16 // Write data.
eor VREG01.16b, VREG01.16b, VREG41.16b
stp XINPUT4, XINPUT6, [REGOUT], #16
eor XINPUT10, XINPUT10, XINPUT11
eor VREG02.16b, VREG02.16b, VREG42.16b
eor XINPUT12, XINPUT12, XINPUT13
eor VREG03.16b, VREG03.16b, VREG43.16b
eor XINPUT14, XINPUT14, XINPUT15
stp XINPUT8, XINPUT10, [REGOUT], #16
eor VREG04.16b, VREG04.16b, VREG44.16b
ld1 {VREG41.16b, VREG42.16b, VREG43.16b, VREG44.16b}, [REGINC], #64 // Load 64 bytes.
stp XINPUT12, XINPUT14, [REGOUT], #16
eor VREG11.16b, VREG11.16b, VREG41.16b
eor VREG12.16b, VREG12.16b, VREG42.16b
st1 {VREG01.16b, VREG02.16b, VREG03.16b, VREG04.16b}, [REGOUT], #64 // Write 64 bytes.
eor VREG13.16b, VREG13.16b, VREG43.16b
eor VREG14.16b, VREG14.16b, VREG44.16b
ld1 {VREG01.16b, VREG02.16b, VREG03.16b, VREG04.16b}, [REGINC], #64 // Load 64 bytes.
st1 {VREG11.16b, VREG12.16b, VREG13.16b, VREG14.16b}, [REGOUT], #64 // Write 64 bytes.
eor VREG21.16b, VREG21.16b, VREG01.16b
eor VREG22.16b, VREG22.16b, VREG02.16b
eor VREG23.16b, VREG23.16b, VREG03.16b
eor VREG24.16b, VREG24.16b, VREG04.16b
st1 {VREG21.16b, VREG22.16b, VREG23.16b, VREG24.16b}, [REGOUT], #64 // Write 64 bytes.
.endm
.macro CHA256_WRITE_BACKB src1, src2, src3, src4
ld1 {VREG41.16b, VREG42.16b, VREG43.16b, VREG44.16b}, [REGINC], #64 // Load 64 bytes.
eor \src1, \src1, VREG41.16b
eor \src2, \src2, VREG42.16b
eor \src3, \src3, VREG43.16b
eor \src4, \src4, VREG44.16b
st1 {\src1, \src2, \src3, \src4}, [REGOUT], #64 // Write 64 bytes.
.endm
#endif
| 2302_82127028/openHiTLS-examples_1508 | crypto/chacha20/src/asm/chacha20_256block_aarch64.S | Unix Assembly | unknown | 12,601 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#include "hitls_build.h"
#ifdef HITLS_CRYPTO_CHACHA20
.text
.macro CHA512_EXTA
VEXT2 VREG04.16b, VREG14.16b, #12
VEXT2 VREG24.16b, VREG34.16b, #12
VEXT2 VREG44.16b, VREG54.16b, #12
VEXT2 VREG02.16b, VREG12.16b, #4
VEXT2 VREG22.16b, VREG32.16b, #4
VEXT2 VREG42.16b, VREG52.16b, #4
.endm
.macro CHA512_EXTB
VEXT2 VREG04.16b, VREG14.16b, #4
VEXT2 VREG24.16b, VREG34.16b, #4
VEXT2 VREG44.16b, VREG54.16b, #4
VEXT2 VREG02.16b, VREG12.16b, #12
VEXT2 VREG22.16b, VREG32.16b, #12
VEXT2 VREG42.16b, VREG52.16b, #12
.endm
.macro CHA512_SET_VDATA
mov VREG01.16b, VSIGMA.16b
mov VREG11.16b, VSIGMA.16b
mov VREG21.16b, VSIGMA.16b
mov VREG31.16b, VSIGMA.16b
mov VREG41.16b, VSIGMA.16b
mov VREG51.16b, VSIGMA.16b
mov VREG02.16b, VKEY01.16b
mov VREG12.16b, VKEY01.16b
mov VREG22.16b, VKEY01.16b
mov VREG32.16b, VKEY01.16b
mov VREG42.16b, VKEY01.16b
mov VREG52.16b, VKEY01.16b
mov VREG03.16b, VKEY02.16b
mov VREG13.16b, VKEY02.16b
mov VREG23.16b, VKEY02.16b
mov VREG33.16b, VKEY02.16b
mov VREG43.16b, VKEY02.16b
mov VREG53.16b, VKEY02.16b
mov VREG04.16b, VCUR01.16b // Counter + 2
mov VREG14.16b, VCUR02.16b // Counter + 3
mov VREG24.16b, VCUR03.16b // Counter + 4
mov VREG34.16b, VCUR04.16b // Counter + 5
add VREG44.4s, VREG04.4s, VADDER.4s // Counter + 6 = 4 + 2
add VREG54.4s, VREG14.4s, VADDER.4s // Counter + 7 = 4 + 3
.endm
.macro CHA512_ROUND_END
add VREG01.4s, VREG01.4s, VSIGMA.4s // After the loop is complete, add input.
add VREG11.4s, VREG11.4s, VSIGMA.4s
add VREG21.4s, VREG21.4s, VSIGMA.4s
add VREG31.4s, VREG31.4s, VSIGMA.4s
add VREG41.4s, VREG41.4s, VSIGMA.4s
add VREG51.4s, VREG51.4s, VSIGMA.4s
add VREG02.4s, VREG02.4s, VKEY01.4s // After the loop is complete, add input.
add VREG12.4s, VREG12.4s, VKEY01.4s
add VREG22.4s, VREG22.4s, VKEY01.4s
add VREG32.4s, VREG32.4s, VKEY01.4s
add VREG42.4s, VREG42.4s, VKEY01.4s
add VREG52.4s, VREG52.4s, VKEY01.4s
add VREG03.4s, VREG03.4s, VKEY02.4s // After the loop is complete, add input.
add VREG13.4s, VREG13.4s, VKEY02.4s
add VREG23.4s, VREG23.4s, VKEY02.4s
add VREG33.4s, VREG33.4s, VKEY02.4s
add VREG43.4s, VREG43.4s, VKEY02.4s
add VREG53.4s, VREG53.4s, VKEY02.4s
add VREG44.4s, VREG44.4s, VCUR01.4s // 2
add VREG54.4s, VREG54.4s, VCUR02.4s // 3
add VREG04.4s, VREG04.4s, VCUR01.4s // 2
add VREG14.4s, VREG14.4s, VCUR02.4s // 3
add VREG24.4s, VREG24.4s, VCUR03.4s // 4
add VREG34.4s, VREG34.4s, VCUR04.4s // 5
add VREG44.4s, VREG44.4s, VADDER.4s // 4 + 2
add VREG54.4s, VREG54.4s, VADDER.4s // 4 + 3
.endm
.macro CHA512_WRITE_BACK
ld1 {VCUR01.16b, VCUR02.16b, VCUR03.16b, VCUR04.16b}, [REGINC], #64 // Load 64 bytes.
eor VREG01.16b, VREG01.16b, VCUR01.16b
eor VREG02.16b, VREG02.16b, VCUR02.16b
eor VREG03.16b, VREG03.16b, VCUR03.16b
eor VREG04.16b, VREG04.16b, VCUR04.16b
ld1 {VCUR01.16b, VCUR02.16b, VCUR03.16b, VCUR04.16b}, [REGINC], #64 // Load 64 bytes.
st1 {VREG01.16b, VREG02.16b, VREG03.16b, VREG04.16b}, [REGOUT], #64 // Write 64 bytes.
eor VREG11.16b, VREG11.16b, VCUR01.16b
eor VREG12.16b, VREG12.16b, VCUR02.16b
eor VREG13.16b, VREG13.16b, VCUR03.16b
eor VREG14.16b, VREG14.16b, VCUR04.16b
ld1 {VREG01.16b, VREG02.16b, VREG03.16b, VREG04.16b}, [REGINC], #64 // Load 64 bytes.
st1 {VREG11.16b, VREG12.16b, VREG13.16b, VREG14.16b}, [REGOUT], #64 // Write 64 bytes.
eor VREG21.16b, VREG21.16b, VREG01.16b
eor VREG22.16b, VREG22.16b, VREG02.16b
eor VREG23.16b, VREG23.16b, VREG03.16b
eor VREG24.16b, VREG24.16b, VREG04.16b
ld1 {VREG11.16b, VREG12.16b, VREG13.16b, VREG14.16b}, [REGINC], #64 // Load 64 bytes.
st1 {VREG21.16b, VREG22.16b, VREG23.16b, VREG24.16b}, [REGOUT], #64 // Write 64 bytes.
eor VREG31.16b, VREG31.16b, VREG11.16b
eor VREG32.16b, VREG32.16b, VREG12.16b
eor VREG33.16b, VREG33.16b, VREG13.16b
eor VREG34.16b, VREG34.16b, VREG14.16b
ld1 {VREG01.16b, VREG02.16b, VREG03.16b, VREG04.16b}, [REGINC], #64 // Load 64 bytes.
st1 {VREG31.16b, VREG32.16b, VREG33.16b, VREG34.16b}, [REGOUT], #64 // Write 64 bytes.
shl VREG21.4s, VADDER.4s, #1 // 4 -> 8
eor VREG41.16b, VREG41.16b, VREG01.16b
eor VREG42.16b, VREG42.16b, VREG02.16b
eor VREG43.16b, VREG43.16b, VREG03.16b
eor VREG44.16b, VREG44.16b, VREG04.16b
ld1 {VREG11.16b, VREG12.16b, VREG13.16b, VREG14.16b}, [REGINC], #64 // Load 64 bytes.
st1 {VREG41.16b, VREG42.16b, VREG43.16b, VREG44.16b}, [REGOUT], #64 // Write 64 bytes.
ldp QCUR01, QCUR02, [sp, #32] // restore counter 0 1 2 4
ldp QCUR03, QCUR04, [sp, #64]
eor VREG51.16b, VREG51.16b, VREG11.16b
eor VREG52.16b, VREG52.16b, VREG12.16b
eor VREG53.16b, VREG53.16b, VREG13.16b
eor VREG54.16b, VREG54.16b, VREG14.16b
st1 {VREG51.16b, VREG52.16b, VREG53.16b, VREG54.16b}, [REGOUT], #64 // Write 64 bytes.
add VCUR01.4s, VCUR01.4s, VREG21.4s
add VCUR02.4s, VCUR02.4s, VREG21.4s
add VCUR03.4s, VCUR03.4s, VREG21.4s
add VCUR04.4s, VCUR04.4s, VREG21.4s
.endm
.macro CHA512_ROUND
WCHA_ADD_A_B // a += b
VADD2 VREG02.4s, VREG01.4s, VREG12.4s, VREG11.4s // a[0,1,2,3] += b[4,5,6,7]
VADD2 VREG22.4s, VREG21.4s, VREG32.4s, VREG31.4s
WCHA_EOR_D_A // d ^= a
VADD2 VREG42.4s, VREG41.4s, VREG52.4s, VREG51.4s
VEOR2 VREG01.16b, VREG04.16b, VREG11.16b, VREG14.16b // d[12,13,14,15] ^= a[0,1,2,3]
WCHA_ROR_D #16 // d <<<= 16 ror Cyclic shift right by 16 bits.
VEOR2 VREG21.16b, VREG24.16b, VREG31.16b, VREG34.16b
VEOR2 VREG41.16b, VREG44.16b, VREG51.16b, VREG54.16b
WCHA_ADD_C_D // c += d
VREV322 VREG04.8h, VREG14.8h // d[12,13,14,15] (#16 inverse).
VREV322 VREG24.8h, VREG34.8h
WCHA_EOR_B_C
VREV322 VREG44.8h, VREG54.8h
VADD2 VREG04.4s, VREG03.4s, VREG14.4s, VREG13.4s // c[8,9,10,11] += d[12,13,14,15]
WCHA_ROR_B #20
VADD2 VREG24.4s, VREG23.4s, VREG34.4s, VREG33.4s
VADD2 VREG44.4s, VREG43.4s, VREG54.4s, VREG53.4s
WCHA_ADD_A_B // a += b
VEORX VREG03.16b, VREG02.16b, VCUR01.16b, VREG13.16b, VREG12.16b, VCUR02.16b // m = b[4,5,6,7] ^ c[8,9,10,11]
VEORX VREG23.16b, VREG22.16b, VCUR03.16b, VREG33.16b, VREG32.16b, VCUR04.16b
WCHA_EOR_D_A
VEORX VREG43.16b, VREG42.16b, VCUR05.16b, VREG53.16b, VREG52.16b, VCUR06.16b
VUSHR2 VCUR01.4s, VREG02.4s, VCUR02.4s, VREG12.4s, #20 // b[4,5,6,7] = m << 20
WCHA_ROR_D #24
VUSHR2 VCUR03.4s, VREG22.4s, VCUR04.4s, VREG32.4s, #20
VUSHR2 VCUR05.4s, VREG42.4s, VCUR06.4s, VREG52.4s, #20
WCHA_ADD_C_D // c += d
VSLI2 VCUR01.4s, VREG02.4s, VCUR02.4s, VREG12.4s, #12 // b[4,5,6,7] = m >> 12
VSLI2 VCUR03.4s, VREG22.4s, VCUR04.4s, VREG32.4s, #12
WCHA_EOR_B_C
VSLI2 VCUR05.4s, VREG42.4s, VCUR06.4s, VREG52.4s, #12
VADD2 VREG02.4s, VREG01.4s, VREG12.4s, VREG11.4s // a[0,1,2,3] += b[4,5,6,7]
WCHA_ROR_B #25
VADD2 VREG22.4s, VREG21.4s, VREG32.4s, VREG31.4s
VADD2 VREG42.4s, VREG41.4s, VREG52.4s, VREG51.4s
WCHA_ADD2_A_B
VEORX VREG04.16b, VREG01.16b, VCUR01.16b, VREG14.16b, VREG11.16b, VCUR02.16b // m = d[12,13,14,15] ^ a[0,1,2,3]
VEORX VREG24.16b, VREG21.16b, VCUR03.16b, VREG34.16b, VREG31.16b, VCUR04.16b
WCHA_EOR2_D_A
VEORX VREG44.16b, VREG41.16b, VCUR05.16b, VREG54.16b, VREG51.16b, VCUR06.16b
VUSHR2 VCUR01.4s, VREG04.4s, VCUR02.4s, VREG14.4s, #24 // d[12,13,14,15] = m << 24
WCHA_ROR_D #16
VUSHR2 VCUR03.4s, VREG24.4s, VCUR04.4s, VREG34.4s, #24
VUSHR2 VCUR05.4s, VREG44.4s, VCUR06.4s, VREG54.4s, #24
WCHA_ADD2_C_D
VSLI2 VCUR01.4s, VREG04.4s, VCUR02.4s, VREG14.4s, #8 // d[12,13,14,15] = m >> 8
VSLI2 VCUR03.4s, VREG24.4s, VCUR04.4s, VREG34.4s, #8
WCHA_EOR2_B_C
VSLI2 VCUR05.4s, VREG44.4s, VCUR06.4s, VREG54.4s, #8
VADD2 VREG04.4s, VREG03.4s, VREG14.4s, VREG13.4s // c[8,9,10,11] += d[12,13,14,15]
WCHA_ROR_B #20
VADD2 VREG24.4s, VREG23.4s, VREG34.4s, VREG33.4s
VADD2 VREG44.4s, VREG43.4s, VREG54.4s, VREG53.4s
WCHA_ADD2_A_B
VEORX VREG03.16b, VREG02.16b, VCUR01.16b, VREG13.16b, VREG12.16b, VCUR02.16b // m = b[4,5,6,7] ^ c[8,9,10,11]
VEORX VREG23.16b, VREG22.16b, VCUR03.16b, VREG33.16b, VREG32.16b, VCUR04.16b
WCHA_EOR2_D_A
VEORX VREG43.16b, VREG42.16b, VCUR05.16b, VREG53.16b, VREG52.16b, VCUR06.16b
VUSHR2 VCUR01.4s, VREG02.4s, VCUR02.4s, VREG12.4s, #25 // b[4,5,6,7] = m << 25
WCHA_ROR_D #24
VUSHR2 VCUR03.4s, VREG22.4s, VCUR04.4s, VREG32.4s, #25
VUSHR2 VCUR05.4s, VREG42.4s, VCUR06.4s, VREG52.4s, #25
WCHA_ADD2_C_D
VSLI2 VCUR01.4s, VREG02.4s, VCUR02.4s, VREG12.4s, #7 // b[4,5,6,7] = m >> 7
VSLI2 VCUR03.4s, VREG22.4s, VCUR04.4s, VREG32.4s, #7
WCHA_EOR2_B_C
VSLI2 VCUR05.4s, VREG42.4s, VCUR06.4s, VREG52.4s, #7
VEXT2 VREG03.16b, VREG13.16b, #8
WCHA_ROR_B #25
VEXT2 VREG23.16b, VREG33.16b, #8
VEXT2 VREG43.16b, VREG53.16b, #8
.endm
#endif
| 2302_82127028/openHiTLS-examples_1508 | crypto/chacha20/src/asm/chacha20_512block_aarch64.S | Unix Assembly | unknown | 10,013 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#include "hitls_build.h"
#ifdef HITLS_CRYPTO_CHACHA20
.text
.macro CHA64_SET_WDATA
mov WINPUT0, WSIG01
lsr XINPUT1, XSIG01, #32
mov WINPUT2, WSIG02
lsr XINPUT3, XSIG02, #32
mov WINPUT4, WKEY01
lsr XINPUT5, XKEY01, #32
mov WINPUT6, WKEY02
lsr XINPUT7, XKEY02, #32
mov WINPUT8, WKEY03
lsr XINPUT9, XKEY03, #32
mov WINPUT10, WKEY04
lsr XINPUT11, XKEY04, #32
mov WINPUT12, WCOUN1
lsr XINPUT13, XCOUN1, #32 // 0
mov WINPUT14, WCOUN2
lsr XINPUT15, XCOUN2, #32
.endm
.macro CHA64_ROUND_END
add WINPUT0, WINPUT0, WSIG01 // Sum of the upper 32 bits and lower 32 bits.
add XINPUT1, XINPUT1, XSIG01, lsr#32
add WINPUT2, WINPUT2, WSIG02
add XINPUT3, XINPUT3, XSIG02, lsr#32
add WINPUT4, WINPUT4, WKEY01
add XINPUT5, XINPUT5, XKEY01, lsr#32
add WINPUT6, WINPUT6, WKEY02
add XINPUT7, XINPUT7, XKEY02, lsr#32
add WINPUT8, WINPUT8, WKEY03
add XINPUT9, XINPUT9, XKEY03, lsr#32
add WINPUT10, WINPUT10, WKEY04
add XINPUT11, XINPUT11, XKEY04, lsr#32
add WINPUT12, WINPUT12, WCOUN1
add XINPUT13, XINPUT13, XCOUN1, lsr#32
add WINPUT14, WINPUT14, WCOUN2
add XINPUT15, XINPUT15, XCOUN2, lsr#32
add XINPUT0, XINPUT0, XINPUT1, lsl#32 // Combination of upper 32 bits and lower 32 bits.
add XINPUT2, XINPUT2, XINPUT3, lsl#32 // Combination of upper 32 bits and lower 32 bits.
ldp XINPUT1, XINPUT3, [REGINC], #16 // Load input.
add XINPUT4, XINPUT4, XINPUT5, lsl#32 // Combination of upper 32 bits and lower 32 bits.
add XINPUT6, XINPUT6, XINPUT7, lsl#32 // Combination of upper 32 bits and lower 32 bits.
ldp XINPUT5, XINPUT7, [REGINC], #16 // Load input.
add XINPUT8, XINPUT8, XINPUT9, lsl#32 // Combination of upper 32 bits and lower 32 bits.
add XINPUT10, XINPUT10, XINPUT11, lsl#32 // Combination of upper 32 bits and lower 32 bits.
ldp XINPUT9, XINPUT11, [REGINC], #16 // Load input.
add XINPUT12, XINPUT12, XINPUT13, lsl#32 // Combination of upper 32 bits and lower 32 bits.
add XINPUT14, XINPUT14, XINPUT15, lsl#32 // Combination of upper 32 bits and lower 32 bits.
ldp XINPUT13, XINPUT15, [REGINC], #16 // Load input.
#ifdef HITLS_BIG_ENDIAN // Special processing is required in big-endian mode.
rev XINPUT0, XINPUT0
rev XINPUT2, XINPUT2
rev XINPUT4, XINPUT4
rev XINPUT6, XINPUT6
rev XINPUT8, XINPUT8
rev XINPUT10, XINPUT10
rev XINPUT12, XINPUT12
rev XINPUT14, XINPUT14
#endif
.endm
.macro CHA64_WRITE_BACK
eor XINPUT0, XINPUT0, XINPUT1
eor XINPUT2, XINPUT2, XINPUT3
eor XINPUT4, XINPUT4, XINPUT5
eor XINPUT6, XINPUT6, XINPUT7
eor XINPUT8, XINPUT8, XINPUT9
stp XINPUT0, XINPUT2, [REGOUT], #16 // Write data.
eor XINPUT10, XINPUT10, XINPUT11
stp XINPUT4, XINPUT6, [REGOUT], #16
eor XINPUT12, XINPUT12, XINPUT13
eor XINPUT14, XINPUT14, XINPUT15
stp XINPUT8, XINPUT10, [REGOUT], #16
stp XINPUT12, XINPUT14, [REGOUT], #16
.endm
#endif | 2302_82127028/openHiTLS-examples_1508 | crypto/chacha20/src/asm/chacha20_64block_aarch64.S | Unix Assembly | unknown | 3,649 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#include "hitls_build.h"
#ifdef HITLS_CRYPTO_CHACHA20
#include "crypt_arm.h"
#include "chacha20_common_aarch64.S"
#include "chacha20_64block_aarch64.S"
#include "chacha20_256block_aarch64.S"
#include "chacha20_512block_aarch64.S"
.section .rodata
.ADD_LONG:
.long 1,0,0,0
/**
* @Interconnection with the C interface:void CHACHA20_Update(CRYPT_CHACHA20_Ctx *ctx, const uint8_t *in, uint8_t *out, uint32_t len);
* @brief Chacha20 algorithm
* @param ctx [IN] Algorithm context, which is set by the C interface and transferred.
* @param in [IN] Data to be encrypted
* @param out [OUT] Data after encryption
* @param len [IN] Encrypted length
*/
.text
.globl CHACHA20_Update
.type CHACHA20_Update,%function
.align 4
CHACHA20_Update:
AARCH64_PACIASP
lsr REGLEN, REGLEN, #6 // Divided by 64 to calculate how many blocks.
stp x29, x30, [sp, #-96]! // x29 x30 store sp -96 address sp -=96.
add x29, sp, #0 // x29 = sp
stp x19, x20, [sp, #80] // x19 x20 store sp, sp +=16.
stp x21, x22, [sp, #64]
cmp REGLEN, #1 // 1
stp x23, x24, [sp, #48]
stp x25, x26, [sp, #32]
stp x27, x28, [sp, #16]
sub sp, sp, #128+64 // sp -= 192
b.lo .Lchacha_end // Less than 1 block.
b.eq .Lchacha64 // Equals 1 block.
adrp x5, .ADD_LONG
add x5, x5, :lo12:.ADD_LONG // load(1, 0, 0, 0)
cmp REGLEN, #8 // >= 512(64*8)
#ifdef HITLS_BIG_ENDIAN
ldp XSIG01, XSIG02, [x0]
ld1 {VSIGMA.4s}, [x0], #16 // {sima0, sima1, key0, key1, key3, key4, counter1, counter2}
ldp XKEY01, XKEY02, [x0]
ldp XKEY03, XKEY04, [x0, #16]
ld1 {VKEY01.4s, VKEY02.4s}, [x0], #32
ldp XCOUN1, XCOUN2, [x0]
ld1 {VCOUN0.4s}, [x0]
// Processing when the big-endian machine is loaded.
ror XCOUN1, XCOUN1, #32
ror XCOUN2, XCOUN2, #32
ror XSIG01, XSIG01, #32
ror XSIG02, XSIG02, #32
add WINPUT2, WCOUN1, w3
ror XKEY01, XKEY01, #32
ror XKEY02, XKEY02, #32
ror XKEY03, XKEY03, #32
ror XKEY04, XKEY04, #32
str WINPUT2, [x0]
#else
ldp XSIG01, XSIG02, [x0]
ld1 {VSIGMA.4s}, [x0], #16 // {sima0, sima1, key0, key1, key3, key4, counter1, counter2}
ldp XKEY01, XKEY02, [x0]
ldp XKEY03, XKEY04, [x0, #16]
ld1 {VKEY01.4s, VKEY02.4s}, [x0], #32
ldp XCOUN1, XCOUN2, [x0]
ld1 {VCOUN0.4s}, [x0]
add x6, XCOUN1, REGLEN
str x6, [x0] // Write back the counter.
#endif
b.lo .Lchacha256 // < 512
stp QCUR05, QCUR06, [sp, #0] // Write sigma key1 to SP.
ld1 {VADDER.4s}, [x5] // Load ADDR.
add VCUR01.4s, VCOUN0.4s, VADDER.4s // 0
add VCUR01.4s, VCUR01.4s, VADDER.4s // +2
add VCUR02.4s, VCUR01.4s, VADDER.4s // +3
add VCUR03.4s, VCUR02.4s, VADDER.4s // +4
add VCUR04.4s, VCUR03.4s, VADDER.4s // +5
shl VADDER.4s, VADDER.4s, #2 // 4
stp d8, d9,[sp,#128+0] // Meet ABI requirements.
stp d10, d11,[sp,#128+16]
stp d12, d13,[sp,#128+32]
stp d14, d15,[sp,#128+48]
// 8 block
.Loop_512_start:
cmp REGLEN, #8
b.lo .L512ToChacha256 // Less than 512.
CHA64_SET_WDATA // General-purpose register 1 x 64 bytes.
CHA512_SET_VDATA // Wide register 6 x 64 bytes.
stp QCUR01, QCUR02, [sp, #32] // Write counter 0, 1, 2 3 to sp.
stp QCUR03, QCUR04, [sp, #64]
mov x4, #5
sub REGLEN, REGLEN, #8 // Process 512 at a time.
.Loop_512_a_run:
sub x4, x4, #1
CHA512_ROUND
CHA512_EXTA
CHA512_ROUND
CHA512_EXTB
cbnz x4, .Loop_512_a_run
CHA64_ROUND_END // Add to input after the loop is complete.
CHA64_WRITE_BACK // 512 Write 64 bytes in the first half round.
add XCOUN1, XCOUN1, #1 // +1
CHA64_SET_WDATA // Resetting.
mov x4, #5
.Loop_512_b_run:
sub x4, x4, #1
CHA512_ROUND
CHA512_EXTA
CHA512_ROUND
CHA512_EXTB
cbnz x4, .Loop_512_b_run
CHA64_ROUND_END // Add to input after the loop is complete.
CHA64_WRITE_BACK // 512 Write 64 bytes in the first half round.
add XCOUN1, XCOUN1, #7 // +7
ldp QCUR05, QCUR06, [sp, #0] // Restore sigma and key1.
ldp QCUR01, QCUR02, [sp, #32] // Restore counter 0 1 2 4.
ldp QCUR03, QCUR04, [sp, #64]
CHA512_ROUND_END // Add to input after the loop is complete.
CHA512_WRITE_BACK // Write back data.
b .Loop_512_start // return start.
// 1 block
.Lchacha64:
#ifdef HITLS_BIG_ENDIAN
ldp XCOUN1, XCOUN2, [x0, #48]
ldp XSIG01, XSIG02, [x0]
ldp XKEY01, XKEY02, [x0, #16]
// Processing when the big-endian machine is loaded
ror XCOUN1, XCOUN1, #32
ror XCOUN2, XCOUN2, #32
ror XSIG01, XSIG01, #32
ror XSIG02, XSIG02, #32
ldp XKEY03, XKEY04, [x0, #32]
add WINPUT0, WCOUN1, w3
ror XKEY01, XKEY01, #32
ror XKEY02, XKEY02, #32
ror XKEY03, XKEY03, #32
ror XKEY04, XKEY04, #32
str WINPUT0, [x0, #48]
#else
ldp XCOUN1, XCOUN2, [x0, #48]
ldp XSIG01, XSIG02, [x0]
ldp XKEY01, XKEY02, [x0, #16]
add XINPUT0, XCOUN1, REGLEN
ldp XKEY03, XKEY04, [x0, #32]
str XINPUT0, [x0, #48] // Write data.
#endif
.Loop_64_start:
CHA64_SET_WDATA // General-purpose register, 1x64byte.
mov x4, #10
.Loop_64_run:
sub x4, x4, #1
WCHA_ADD_A_B // a += b
WCHA_EOR_D_A // d ^= a
WCHA_ROR_D #16 // d <<<= 16 ror Cyclic shift right by 16 bits.
WCHA_ADD_C_D // c += d
WCHA_EOR_B_C
WCHA_ROR_B #20
WCHA_ADD_A_B // a += b
WCHA_EOR_D_A
WCHA_ROR_D #24
WCHA_ADD_C_D // c += d
WCHA_EOR_B_C
WCHA_ROR_B #25
WCHA_ADD2_A_B
WCHA_EOR2_D_A
WCHA_ROR_D #16
WCHA_ADD2_C_D
WCHA_EOR2_B_C
WCHA_ROR_B #20
WCHA_ADD2_A_B
WCHA_EOR2_D_A
WCHA_ROR_D #24
WCHA_ADD2_C_D
WCHA_EOR2_B_C
WCHA_ROR_B #25
cbnz x4, .Loop_64_run
CHA64_ROUND_END // Add to input after the loop is complete.
subs REGLEN, REGLEN, #1
CHA64_WRITE_BACK // Write 64 bytes.
add XCOUN1, XCOUN1, #1
b.le .Lchacha_end
b .Loop_64_start
.L512ToChacha256:
ldp d8,d9,[sp,#128+0] // Meet ABI requirements.
ldp d10,d11,[sp,#128+16]
ldp d12,d13,[sp,#128+32]
ldp d14,d15,[sp,#128+48]
cbz REGLEN, .Lchacha_end // The length is 0.
ushr VADDER.4s, VADDER.4s, #2 // 4->1
sub VREG52.4s, VCUR01.4s, VADDER.4s // 10-1 = 9 8
sub VREG53.4s, VCUR02.4s, VADDER.4s // 11-1 = 10
sub VREG54.4s, VCUR03.4s, VADDER.4s // 12-1 = 11
shl VCUR01.4s, VADDER.4s, #2 // 2 -> 4
b .Loop_256_start
// 4 block
.Lchacha256:
ld1 {VADDER.4s}, [x5] // Load ADDR.
mov VREG51.16b, VCOUN0.16b // 0
add VREG52.4s, VCOUN0.4s, VADDER.4s // 1
add VREG53.4s, VREG52.4s, VADDER.4s // 2
add VREG54.4s, VREG53.4s, VADDER.4s // 3
shl VCUR01.4s, VADDER.4s, #2 // 4
.Loop_256_start:
CHA64_SET_WDATA // General-purpose register 16 byte.
CHA256_SET_VDATA // Neon register 3 * 48 byte.
mov x4, #10
.Loop_256_run:
sub x4, x4, #1
CHA256_ROUND_A
VEXT2 VREG04.16b, VREG14.16b, #12
VEXT2 VREG24.16b, VREG34.16b, #12
VEXT2 VREG02.16b, VREG12.16b, #4
VEXT2 VREG22.16b, VREG32.16b, #4
CHA256_ROUND_B
VEXT2 VREG04.16b, VREG14.16b, #4
VEXT2 VREG24.16b, VREG34.16b, #4
VEXT2 VREG02.16b, VREG12.16b, #12
VEXT2 VREG22.16b, VREG32.16b, #12
cbnz x4, .Loop_256_run
subs REGLEN, REGLEN, #4 // One-time processing 256.
CHA256_ROUND_END
b.lo .Lchacha_less_than_256 // < 0
CHA64_ROUND_END
CHA256_WRITE_BACK // Write back data.
b.le .Lchacha_end // = 0
add XCOUN1, XCOUN1, #4 // Counter+4.
add VREG52.4s, VREG52.4s, VCUR01.4s // Counter+4.
add VREG53.4s, VREG53.4s, VCUR01.4s
add VREG54.4s, VREG54.4s, VCUR01.4s
b .Loop_256_start
.Lchacha_less_than_256:
add REGLEN, REGLEN, #4
cmp REGLEN, #1
b.lo .Lchacha_end // <= 64 byte.
CHA64_ROUND_END
CHA64_WRITE_BACK
sub REGLEN, REGLEN, #1
cmp REGLEN, #1
b.lo .Lchacha_end
CHA256_WRITE_BACKB VREG01.16b, VREG02.16b, VREG03.16b, VREG04.16b
sub REGLEN, REGLEN, #1
cmp REGLEN, #1
b.lo .Lchacha_end
CHA256_WRITE_BACKB VREG11.16b, VREG12.16b, VREG13.16b, VREG14.16b
.Lchacha_end:
eor XKEY01, XKEY01, XKEY01
eor XKEY02, XKEY02, XKEY02
eor XKEY03, XKEY03, XKEY03
eor XKEY04, XKEY04, XKEY04
eor XKEY04, XKEY04, XKEY04
eor XCOUN2, XCOUN2, XCOUN2
eor VKEY01.16b, VKEY01.16b, VKEY01.16b
eor VKEY02.16b, VKEY02.16b, VKEY02.16b
eor VCUR01.16b, VCUR01.16b, VCUR01.16b
ldp x19, x20, [x29, #80]
add sp, sp, #128+64
ldp x21, x22, [x29, #64]
ldp x23, x24, [x29, #48]
ldp x25, x26, [x29, #32]
ldp x27, x28, [x29, #16]
ldp x29, x30, [sp], #96
.Labort:
AARCH64_AUTIASP
ret
.size CHACHA20_Update,.-CHACHA20_Update
#endif
| 2302_82127028/openHiTLS-examples_1508 | crypto/chacha20/src/asm/chacha20_aarch64.S | Unix Assembly | unknown | 10,460 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#include "hitls_build.h"
#ifdef HITLS_CRYPTO_CHACHA20
.text
// Input ctx、in、out、len.
REGCTX .req x0
REGINC .req x1
REGOUT .req x2
REGLEN .req x3
// 64-byte input, temporarily loaded register(0 ~ 15).
WINPUT0 .req w5
XINPUT0 .req x5
WINPUT1 .req w6
XINPUT1 .req x6
WINPUT2 .req w7
XINPUT2 .req x7
WINPUT3 .req w8
XINPUT3 .req x8
WINPUT4 .req w9
XINPUT4 .req x9
WINPUT5 .req w10
XINPUT5 .req x10
WINPUT6 .req w11
XINPUT6 .req x11
WINPUT7 .req w12
XINPUT7 .req x12
WINPUT8 .req w13
XINPUT8 .req x13
WINPUT9 .req w14
XINPUT9 .req x14
WINPUT10 .req w15
XINPUT10 .req x15
WINPUT11 .req w16
XINPUT11 .req x16
WINPUT12 .req w17
XINPUT12 .req x17
WINPUT13 .req w19
XINPUT13 .req x19
WINPUT14 .req w20
XINPUT14 .req x20
WINPUT15 .req w21
XINPUT15 .req x21
// 8 blocks in parallel, 6 blocks of 64-byte data in 8 blocks of 512 bytes.
VREG01 .req v0
VREG02 .req v1
VREG03 .req v2
VREG04 .req v3
VREG11 .req v4
VREG12 .req v5
VREG13 .req v6
VREG14 .req v7
VREG21 .req v8
VREG22 .req v9
VREG23 .req v10
VREG24 .req v11
VREG31 .req v12
VREG32 .req v13
VREG33 .req v14
VREG34 .req v15
VREG41 .req v16
VREG42 .req v17
VREG43 .req v18
VREG44 .req v19
VREG51 .req v20
VREG52 .req v21
VREG53 .req v22
VREG54 .req v23
// Public register, used for temporary calculation.
VCUR01 .req v24
QCUR01 .req q24
VCUR02 .req v25
QCUR02 .req q25
VCUR03 .req v26
QCUR03 .req q26
VCUR04 .req v27
QCUR04 .req q27
VCUR05 .req v28
QCUR05 .req q28
VCUR06 .req v29
QCUR06 .req q29
// Counter、sigma、key、adder register.
VCOUN0 .req v27
VSIGMA .req v28
VKEY01 .req v29
VKEY02 .req v30
VADDER .req v31
// Counter、sigma、key、adder register.
WSIG01 .req w22
XSIG01 .req x22
WSIG02 .req w23
XSIG02 .req x23
WKEY01 .req w24
XKEY01 .req x24
WKEY02 .req w25
XKEY02 .req x25
WKEY03 .req w26
XKEY03 .req x26
WKEY04 .req w27
XKEY04 .req x27
WCOUN1 .req w28
XCOUN1 .req x28
WCOUN2 .req w30
XCOUN2 .req x30
.macro VADD2 src, dest, src2, dest2
add \dest, \dest, \src
add \dest2, \dest2, \src2
.endm
.macro VEOR2 src, dest, src2, dest2
eor \dest, \dest, \src
eor \dest2, \dest2, \src2
.endm
.macro VEORX srca, srcb, dest, srca2, srcb2, dest2
eor \dest, \srcb, \srca
eor \dest2, \srcb2, \srca2
.endm
.macro VREV322 dest, dest2
rev32 \dest, \dest
rev32 \dest2, \dest2
.endm
.macro VUSHR2 src, dest, src2, dest2, count
ushr \dest, \src, \count
ushr \dest2, \src2, \count
.endm
.macro VSLI2 src, dest, src2, dest2, count
sli \dest, \src, \count
sli \dest2, \src2, \count
.endm
.macro VEXT2 src, src2, count
ext \src, \src, \src, \count
ext \src2, \src2, \src2, \count
.endm
.macro WCHA_ADD_A_B
add WINPUT0, WINPUT0, WINPUT4
add WINPUT1, WINPUT1, WINPUT5
add WINPUT2, WINPUT2, WINPUT6
add WINPUT3, WINPUT3, WINPUT7
.endm
.macro WCHA_EOR_D_A
eor WINPUT12, WINPUT12, WINPUT0
eor WINPUT13, WINPUT13, WINPUT1
eor WINPUT14, WINPUT14, WINPUT2
eor WINPUT15, WINPUT15, WINPUT3
.endm
.macro WCHA_ROR_D count
ror WINPUT12, WINPUT12, \count
ror WINPUT13, WINPUT13, \count
ror WINPUT14, WINPUT14, \count
ror WINPUT15, WINPUT15, \count
.endm
.macro WCHA_ADD_C_D
add WINPUT8, WINPUT8, WINPUT12
add WINPUT9, WINPUT9, WINPUT13
add WINPUT10, WINPUT10, WINPUT14
add WINPUT11, WINPUT11, WINPUT15
.endm
.macro WCHA_EOR_B_C
eor WINPUT4, WINPUT4, WINPUT8
eor WINPUT5, WINPUT5, WINPUT9
eor WINPUT6, WINPUT6, WINPUT10
eor WINPUT7, WINPUT7, WINPUT11
.endm
.macro WCHA_ROR_B count
ror WINPUT4, WINPUT4, \count
ror WINPUT5, WINPUT5, \count
ror WINPUT6, WINPUT6, \count
ror WINPUT7, WINPUT7, \count
.endm
.macro WCHA_ADD2_A_B
add WINPUT0, WINPUT0, WINPUT5
add WINPUT1, WINPUT1, WINPUT6
add WINPUT2, WINPUT2, WINPUT7
add WINPUT3, WINPUT3, WINPUT4
.endm
.macro WCHA_EOR2_D_A
eor WINPUT15, WINPUT15, WINPUT0
eor WINPUT12, WINPUT12, WINPUT1
eor WINPUT13, WINPUT13, WINPUT2
eor WINPUT14, WINPUT14, WINPUT3
.endm
.macro WCHA_ADD2_C_D
add WINPUT10, WINPUT10, WINPUT15
add WINPUT11, WINPUT11, WINPUT12
add WINPUT8, WINPUT8, WINPUT13
add WINPUT9, WINPUT9, WINPUT14
.endm
.macro WCHA_EOR2_B_C
eor WINPUT5, WINPUT5, WINPUT10
eor WINPUT6, WINPUT6, WINPUT11
eor WINPUT7, WINPUT7, WINPUT8
eor WINPUT4, WINPUT4, WINPUT9
.endm
#endif
| 2302_82127028/openHiTLS-examples_1508 | crypto/chacha20/src/asm/chacha20_common_aarch64.S | Motorola 68K Assembly | unknown | 4,831 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#include "hitls_build.h"
#ifdef HITLS_CRYPTO_CHACHA20
/* --------------AVX2 Overall design-----------------
* 64->%xmm0-%xmm7 No need to use stack memory
* 128->%xmm0-%xmm11 No need to use stack memory
* 256->%xmm0-%xmm15 Use 256 + 64 bytes of stack memory
* 512->%ymm0-%ymm15 Use 512 + 128 bytes of stack memory
*
--------------AVX512 Overall design-----------------
* 64->%xmm0-%xmm7 No need to use stack memory
* 128->%xmm0-%xmm11 No need to use stack memory
* 256->%xmm0-%xmm31 Use 64-byte stack memory
* 512->%ymm0-%ymm31 Use 128-byte stack memory
* 1024->%zmm0-%zmm31 Use 256-byte stack memory
*/
/*************************************************************************************
* AVX2/AVX512 Generic Instruction Set Using Macros
*************************************************************************************/
/* %xmm0-15 load STATE Macro. */
.macro LOAD_STATE s0 s1 s2 s3 adr
vmovdqu (\adr), \s0 // state[0-3]
vmovdqu 16(\adr), \s1 // state[4-7]
vmovdqu 32(\adr), \s2 // state[8-11]
vmovdqu 48(\adr), \s3 // state[12-15]
.endm
/* %ymm0-15 load STATE Macro. */
.macro LOAD_512_STATE s0 s1 s2 s3 adr
vbroadcasti128 (\adr), \s0 // state[0-3]
vbroadcasti128 16(\adr), \s1 // state[4-7]
vbroadcasti128 32(\adr), \s2 // state[8-11]
vbroadcasti128 48(\adr), \s3 // state[12-15]
.endm
/*
* %xmm0-15, %ymm0-15 MATRIX TO STATE
* IN: s0 s1 s2 s3 cur1 cur2
* OUT: s0 s3 cur1 cur2
* xmm:
* {A0 B0 C0 D0} => {A0 A1 A2 A3}
* {A1 B1 C1 D1} {B0 B1 B2 B3}
* {A2 B2 C2 D2} {C0 C1 C2 C3}
* {A3 B3 C3 D3} {D0 D1 D2 D3}
* ymm:
* {A0 B0 C0 D0 E0 F0 G0 H0} => {A0 A1 A2 A3 E0 E1 E2 E3}
* {A1 B1 C1 D1 E1 F1 G1 H1} {B0 B1 B2 B3 F0 F1 F2 F3}
* {A2 B2 C2 D2 E2 F2 G2 H2} {C0 C1 C2 C3 G0 G1 G2 G3}
* {A3 B3 C3 D3 E3 F3 G3 H3} {D0 D1 D2 D3 H0 H1 H2 H3}
* zmm:
* {A0 B0 C0 D0 E0 F0 G0 H0 I0 J0 K0 L0 M0 N0 O0 P0} => {A0 A1 A2 A3 E0 E1 E2 E3 I0 I1 I2 I3 M0 M1 M2 M3}
* {A1 B1 C1 D1 E1 F1 G1 H1 I1 J1 K1 L1 M1 N1 O1 P1} {B0 B1 B2 B3 F0 F1 F2 F3 J0 J1 J2 J3 N0 N1 N2 N3}
* {A2 B2 C2 D2 E2 F2 G2 H2 I2 J2 K2 L2 M2 N2 O2 P2} {C0 C1 C2 C3 G0 G1 G2 G3 K0 K1 K2 K3 O0 O1 O2 O3}
* {A3 B3 C3 D3 E3 F3 G3 H3 I3 J3 K3 L3 M3 N3 O3 P3} {D0 D1 D2 D3 H0 H1 H2 H3 L0 L1 L2 L3 P0 P1 P2 P3}
*/
.macro MATRIX_TO_STATE s0 s1 s2 s3 cur1 cur2
vpunpckldq \s1, \s0, \cur1
vpunpckldq \s3, \s2, \cur2
vpunpckhdq \s1, \s0, \s1
vpunpckhdq \s3, \s2, \s2
vpunpcklqdq \cur2, \cur1, \s0
vpunpckhqdq \cur2, \cur1, \s3
vpunpcklqdq \s2, \s1, \cur1
vpunpckhqdq \s2, \s1, \cur2
.endm
/*************************************************************************************
* AVX2 instruction set use macros
*************************************************************************************/
.macro WRITEBACK_64_AVX2 inpos outpos s0 s1 s2 s3
vpxor (\inpos), \s0, \s0
vpxor 16(\inpos), \s1, \s1
vpxor 32(\inpos), \s2, \s2
vpxor 48(\inpos), \s3, \s3
vmovdqu \s0, (\outpos) // write back output
vmovdqu \s1, 16(\outpos)
vmovdqu \s2, 32(\outpos)
vmovdqu \s3, 48(\outpos)
add $64, \inpos
add $64, \outpos
.endm
/*
* Converts a state into a matrix.
* %xmm0-15 %ymm0-15 STATE TO MATRIX
* s0-s15:Corresponding to 16 wide-bit registers,adr:counter Settings; base:address of the data storage stack;
* per:Register bit width,Byte representation(16、32)
*/
.macro STATE_TO_MATRIX s0 s1 s2 s3 s4 s5 s6 s7 s8 s9 s10 s11 s12 s13 s14 s15 base per adr
vpshufd $0b00000000, \s3, \s12
vpshufd $0b01010101, \s3, \s13
vpaddd \adr, \s12, \s12 // 0, 1, 2, 3, 4, 5, 6 ,7
vmovdqa \s12, \base+12*\per(%rsp)
vpshufd $0b10101010, \s3, \s14
vmovdqa \s13, \base+13*\per(%rsp)
vpshufd $0b11111111, \s3, \s15
vmovdqa \s14, \base+14*\per(%rsp)
vpshufd $0b00000000, \s2, \s8
vmovdqa \s15, \base+15*\per(%rsp)
vpshufd $0b01010101, \s2, \s9
vmovdqa \s8, \base+8*\per(%rsp)
vpshufd $0b10101010, \s2, \s10
vmovdqa \s9, \base+9*\per(%rsp)
vpshufd $0b11111111, \s2, \s11
vmovdqa \s10, \base+10*\per(%rsp)
vpshufd $0b00000000, \s1, \s4
vmovdqa \s11, \base+11*\per(%rsp)
vpshufd $0b01010101, \s1, \s5
vmovdqa \s4, \base+4*\per(%rsp)
vpshufd $0b10101010, \s1, \s6
vmovdqa \s5, \base+5*\per(%rsp)
vpshufd $0b11111111, \s1, \s7
vmovdqa \s6, \base+6*\per(%rsp)
vpshufd $0b11111111, \s0, \s3
vmovdqa \s7, \base+7*\per(%rsp)
vpshufd $0b10101010, \s0, \s2
vmovdqa \s3, \base+3*\per(%rsp)
vpshufd $0b01010101, \s0, \s1
vmovdqa \s2, \base+2*\per(%rsp)
vpshufd $0b00000000, \s0, \s0
vmovdqa \s1, \base+1*\per(%rsp)
vmovdqa \s0, \base(%rsp)
.endm
/*
* %xmm0-15 %ymm0-15 LOAD MATRIX
*/
.macro LOAD_MATRIX s0 s1 s2 s3 s4 s5 s6 s7 s8 s9 s10 s11 s12 s13 s14 s15 base per adr
vmovdqa \base(%rsp), \s0
vmovdqa \base+1*\per(%rsp), \s1
vmovdqa \base+2*\per(%rsp), \s2
vmovdqa \base+3*\per(%rsp), \s3
vmovdqa \base+4*\per(%rsp), \s4
vmovdqa \base+5*\per(%rsp), \s5
vmovdqa \base+6*\per(%rsp), \s6
vmovdqa \base+7*\per(%rsp), \s7
vmovdqa \base+8*\per(%rsp), \s8
vmovdqa \base+9*\per(%rsp), \s9
vmovdqa \base+10*\per(%rsp), \s10
vmovdqa \base+11*\per(%rsp), \s11
vmovdqa \base+12*\per(%rsp), \s12
vmovdqa \base+13*\per(%rsp), \s13
vpaddd \adr, \s12, \s12 // add 8, 8, 8, 8, 8, 8, 8, 8 or 4, 4, 4, 4
vmovdqa \base+14*\per(%rsp), \s14
vmovdqa \base+15*\per(%rsp), \s15
vmovdqa \s12, \base+12*\per(%rsp)
.endm
/*
* %xmm0-15(256) %ymm0-15(512) Loop
*/
.macro CHACHA20_LOOP s0 s1 s2 s3 s4 s5 s6 s7 s8 s9 s10 s11 s12 s13 s14 s15 base per A8 ror16 ror8
/* 0 = 0 + 4, 12 = (12 ^ 0) >>> 16 | 8 = 8 + 12, 4 = (4 ^ 8) >>> 12 |
* 0 = 0 + 4, 12 = (12 ^ 0) >>> 8 | 8 = 8 + 12, 4 = (4 ^ 8) >>> 7
* 1 = 1 + 5, 13 = (13 ^ 1) >>> 16 | 9 = 9 + 13, 5 = (5 ^ 9) >>> 12 |
* 1 = 1 + 5, 13 = (13 ^ 1) >>> 8 | 9 = 9 + 13, 5 = (5 ^ 9) >>> 7
*/
COLUM_QUARTER_AVX_0 \s0 \s4 \s12 \s1 \s5 \s13 (\ror16)
COLUM_QUARTER_AVX_1 \s8 \s12 \s4 \s9 \s13 \s5 \s10 \s11 $20 $12
COLUM_QUARTER_AVX_0 \s0 \s4 \s12 \s1 \s5 \s13 (\ror8)
COLUM_QUARTER_AVX_1 \s8 \s12 \s4 \s9 \s13 \s5 \s10 \s11 $25 $7
vmovdqa \s8, \base(\A8)
vmovdqa \s9, \base+\per(\A8)
vmovdqa \base+2*\per(\A8), \s10
vmovdqa \base+3*\per(\A8), \s11
/* 2 = 2 + 6, 14 = (14 ^ 2) >>> 16 | 10 = 10 + 14, 6 = (6 ^ 10)>>> 12 |
* 2 = 2 + 6, 14 = (14 ^ 2) >>> 8 | 10 = 10 + 14, 6 = (6 ^ 10)>>> 7
* 3 = 3 + 7, 15 = (15 ^ 3) >>> 16 | 11 = 11 + 15, 7 = (7 ^ 11)>>> 12 |
* 3 = 3 + 7, 15 = (15 ^ 3) >>> 8 | 11 = 11 + 15, 7 = (7 ^ 11)>>> 7
*/
COLUM_QUARTER_AVX_0 \s2 \s6 \s14 \s3 \s7 \s15 (\ror16)
COLUM_QUARTER_AVX_1 \s10 \s14 \s6 \s11 \s15 \s7 \s8 \s9 $20 $12
COLUM_QUARTER_AVX_0 \s2 \s6 \s14 \s3 \s7 \s15 (\ror8)
COLUM_QUARTER_AVX_1 \s10 \s14 \s6 \s11 \s15 \s7 \s8 \s9 $25 $7
/* 0 = 0 + 5, 15 = (15 ^ 0) >>> 16 | 10 = 10 + 15, 5 = (5 ^ 10) >>> 12 |
* 0 = 0 + 5, 15 = (15 ^ 0) >>> 8 | 10 = 10 + 15, 5 = (5 ^ 10) >>> 7
* 1 = 1 + 6, 12 = (12 ^ 1) >>> 16 | 11 = 11 + 12, 6 = (6 ^ 11) >>> 12 |
* 1 = 1 + 6, 12 = (12 ^ 1) >>> 8 | 11 = 11 + 12, 6 = (6 ^ 11) >>> 7
*/
COLUM_QUARTER_AVX_0 \s0 \s5 \s15 \s1 \s6 \s12 (\ror16)
COLUM_QUARTER_AVX_1 \s10 \s15 \s5 \s11 \s12 \s6 \s8 \s9 $20 $12
COLUM_QUARTER_AVX_0 \s0 \s5 \s15 \s1 \s6 \s12 (\ror8)
COLUM_QUARTER_AVX_1 \s10 \s15 \s5 \s11 \s12 \s6 \s8 \s9 $25 $7
vmovdqa \s10, \base+2*\per(\A8)
vmovdqa \s11, \base+3*\per(\A8)
vmovdqa \base(\A8), \s8
vmovdqa \base+\per(\A8), \s9
/* 2 = 2 + 7, 13 = (13 ^ 2) >>> 16 | 8 = 8 + 13, 7 = (7 ^ 8)>>> 12 |
* 2 = 2 + 7, 13 = (13 ^ 2) >>> 8 | 8 = 8 + 13, 7 = (7 ^ 8)>>> 7
* 3 = 3 + 4, 14 = (14 ^ 3) >>> 16 | 9 = 9 + 14, 4 = (4 ^ 9)>>> 12 |
* 3 = 3 + 4, 14 = (14 ^ 3) >>> 8 | 9 = 9 + 14, 4 = (4 ^ 9)>>> 7
*/
COLUM_QUARTER_AVX_0 \s2 \s7 \s13 \s3 \s4 \s14 (\ror16)
COLUM_QUARTER_AVX_1 \s8 \s13 \s7 \s9 \s14 \s4 \s10 \s11 $20 $12
COLUM_QUARTER_AVX_0 \s2 \s7 \s13 \s3 \s4 \s14 (\ror8)
COLUM_QUARTER_AVX_1 \s8 \s13 \s7 \s9 \s14 \s4 \s10 \s11 $25 $7
.endm
/*
* %xmm0-15 %ymm0-15 QUARTER macro(used when cyclically moving right by 16 or 8)
*/
.macro COLUM_QUARTER_AVX_0 a0 a1 a2 b0 b1 b2 ror
vpaddd \a1, \a0, \a0
vpaddd \b1, \b0, \b0
vpxor \a0, \a2, \a2
vpxor \b0, \b2, \b2
vpshufb \ror, \a2, \a2
vpshufb \ror, \b2, \b2
.endm
/*
* %xmm0-15 %ymm0-15 QUARTER macro(used when cyclically moving right by 12 or 7)
*/
.macro COLUM_QUARTER_AVX_1 a0 a1 a2 b0 b1 b2 cur1 cur2 psr psl
vpaddd \a1, \a0, \a0
vpaddd \b1, \b0, \b0
vpxor \a0, \a2, \a2
vpxor \b0, \b2, \b2
vpsrld \psr, \a2, \cur1
vpsrld \psr, \b2, \cur2
vpslld \psl, \a2, \a2
vpslld \psl, \b2, \b2
vpor \cur1, \a2, \a2
vpor \cur2, \b2, \b2
.endm
/*************************************************************************************
* AVX512 generic instruction set using macros.
*************************************************************************************/
/* %zmm0-15 LOAD STATE MACRO. */
.macro LOAD_1024_STATE s0 s1 s2 s3 adr
vbroadcasti32x4 (\adr), \s0 // state[0-3]
vbroadcasti32x4 16(\adr), \s1 // state[4-7]
vbroadcasti32x4 32(\adr), \s2 // state[8-11]
vbroadcasti32x4 48(\adr), \s3 // state[12-15]
.endm
.macro WRITEBACK_64_AVX512 inpos outpos s0 s1 s2 s3
vpxord (\inpos), \s0, \s0
vpxord 16(\inpos), \s1, \s1
vpxord 32(\inpos), \s2, \s2
vpxord 48(\inpos), \s3, \s3
vmovdqu32 \s0, (\outpos) // Write back output.
vmovdqu32 \s1, 16(\outpos)
vmovdqu32 \s2, 32(\outpos)
vmovdqu32 \s3, 48(\outpos)
add $64, \inpos
add $64, \outpos
.endm
/*
* %zmm0-15 STATE TO MATRIX
*/
.macro STATE_TO_MATRIX_Z_AVX512 in out0 out1 out2 out3
// {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0} .... {15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15}
vpshufd $0b00000000, \in, \out0
vpshufd $0b01010101, \in, \out1
vpshufd $0b10101010, \in, \out2
vpshufd $0b11111111, \in, \out3
.endm
/* AVX512 instruction set
* %zmm0-31(1024) QUARTER
*/
.macro COLUM_QUARTER_AVX512_4 s0 s1 s2 s3 s4 s5 s6 s7 s8 s9 s10 s11 ror
vpaddd \s4, \s0, \s0
vpaddd \s5, \s1, \s1
vpaddd \s6, \s2, \s2
vpaddd \s7, \s3, \s3
vpxord \s0, \s8, \s8
vpxord \s1, \s9, \s9
vpxord \s2, \s10, \s10
vpxord \s3, \s11, \s11
vprold \ror, \s8, \s8
vprold \ror, \s9, \s9
vprold \ror, \s10, \s10
vprold \ror, \s11, \s11
.endm
/* AVX512 instruction set
* %xmm0-15(256) %ymm0-15(512) %zmm0-31(1024) Loop
*/
.macro CHACHA20_LOOP_AVX512 s00 s01 s02 s03 s04 s05 s06 s07 s08 s09 s10 s11 s12 s13 s14 s15
/* 0 = 0 + 4, 12 = (12 ^ 0) >>> 16 | 8 = 8 + 12, 4 = (4 ^ 8) >>> 12 |
* 0 = 0 + 4, 12 = (12 ^ 0) >>> 8 | 8 = 8 + 12, 4 = (4 ^ 8) >>> 7
* 1 = 1 + 5, 13 = (13 ^ 1) >>> 16 | 9 = 9 + 13, 5 = (5 ^ 9) >>> 12 |
* 1 = 1 + 5, 13 = (13 ^ 1) >>> 8 | 9 = 9 + 13, 5 = (5 ^ 9) >>> 7
* 2 = 2 + 6, 14 = (14 ^ 2) >>> 16 | 10 = 10 + 14, 6 = (6 ^ 10)>>> 12 |
* 2 = 2 + 6, 14 = (14 ^ 2) >>> 8 | 10 = 10 + 14, 6 = (6 ^ 10)>>> 7
* 3 = 3 + 7, 15 = (15 ^ 3) >>> 16 | 11 = 11 + 15, 7 = (7 ^ 11)>>> 12 |
* 3 = 3 + 7, 15 = (15 ^ 3) >>> 8 | 11 = 11 + 15, 7 = (7 ^ 11)>>> 7
*/
COLUM_QUARTER_AVX512_4 \s00 \s01 \s02 \s03 \s04 \s05 \s06 \s07 \s12 \s13 \s14 \s15 $16
COLUM_QUARTER_AVX512_4 \s08 \s09 \s10 \s11 \s12 \s13 \s14 \s15 \s04 \s05 \s06 \s07 $12
COLUM_QUARTER_AVX512_4 \s00 \s01 \s02 \s03 \s04 \s05 \s06 \s07 \s12 \s13 \s14 \s15 $8
COLUM_QUARTER_AVX512_4 \s08 \s09 \s10 \s11 \s12 \s13 \s14 \s15 \s04 \s05 \s06 \s07 $7
/* 0 = 0 + 5, 15 = (15 ^ 0) >>> 16 | 10 = 10 + 15, 5 = (5 ^ 10) >>> 12 |
* 0 = 0 + 5, 15 = (15 ^ 0) >>> 8 | 10 = 10 + 15, 5 = (5 ^ 10) >>> 7
* 1 = 1 + 6, 12 = (12 ^ 1) >>> 16 | 11 = 11 + 12, 6 = (6 ^ 11) >>> 12 |
* 1 = 1 + 6, 12 = (12 ^ 1) >>> 8 | 11 = 11 + 12, 6 = (6 ^ 11) >>> 7
* 2 = 2 + 7, 13 = (13 ^ 2) >>> 16 | 8 = 8 + 13, 7 = (7 ^ 8)>>> 12 |
* 2 = 2 + 7, 13 = (13 ^ 2) >>> 8 | 8 = 8 + 13, 7 = (7 ^ 8)>>> 7
* 3 = 3 + 4, 14 = (14 ^ 3) >>> 16 | 9 = 9 + 14, 4 = (4 ^ 9)>>> 12 |
* 3 = 3 + 4, 14 = (14 ^ 3) >>> 8 | 9 = 9 + 14, 4 = (4 ^ 9)>>> 7
*/
COLUM_QUARTER_AVX512_4 \s00 \s01 \s02 \s03 \s05 \s06 \s07 \s04 \s15 \s12 \s13 \s14 $16
COLUM_QUARTER_AVX512_4 \s10 \s11 \s08 \s09 \s15 \s12 \s13 \s14 \s05 \s06 \s07 \s04 $12
COLUM_QUARTER_AVX512_4 \s00 \s01 \s02 \s03 \s05 \s06 \s07 \s04 \s15 \s12 \s13 \s14 $8
COLUM_QUARTER_AVX512_4 \s10 \s11 \s08 \s09 \s15 \s12 \s13 \s14 \s05 \s06 \s07 \s04 $7
.endm
#endif
| 2302_82127028/openHiTLS-examples_1508 | crypto/chacha20/src/asm/chacha20_x8664_common.S | Unix Assembly | unknown | 13,328 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#include "hitls_build.h"
#ifdef HITLS_CRYPTO_CHACHA20
#include "chacha20_x8664_common.S"
.text
.align 64
g_ror16_128:
.byte 0x2,0x3,0x0,0x1, 0x6,0x7,0x4,0x5, 0xa,0xb,0x8,0x9, 0xe,0xf,0xc,0xd, \
0x2,0x3,0x0,0x1, 0x6,0x7,0x4,0x5, 0xa,0xb,0x8,0x9, 0xe,0xf,0xc,0xd
.size g_ror16_128, .-g_ror16_128
.align 64
g_ror8_128:
.byte 0x3,0x0,0x1,0x2, 0x7,0x4,0x5,0x6, 0xb,0x8,0x9,0xa, 0xf,0xc,0xd,0xe, \
0x3,0x0,0x1,0x2, 0x7,0x4,0x5,0x6, 0xb,0x8,0x9,0xa, 0xf,0xc,0xd,0xe
.size g_ror8_128, .-g_ror8_128
.align 64
g_ror16:
.byte 0x2,0x3,0x0,0x1, 0x6,0x7,0x4,0x5, 0xa,0xb,0x8,0x9, 0xe,0xf,0xc,0xd
.size g_ror16, .-g_ror16
.align 64
g_ror8:
.byte 0x3,0x0,0x1,0x2, 0x7,0x4,0x5,0x6, 0xb,0x8,0x9,0xa, 0xf,0xc,0xd,0xe
.size g_ror8, .-g_ror8
.align 64
g_ror16_512:
.byte 0x2,0x3,0x0,0x1, 0x6,0x7,0x4,0x5, 0xa,0xb,0x8,0x9, 0xe,0xf,0xc,0xd, \
0x2,0x3,0x0,0x1, 0x6,0x7,0x4,0x5, 0xa,0xb,0x8,0x9, 0xe,0xf,0xc,0xd
.size g_ror16_512, .-g_ror16_512
.align 64
g_ror8_512:
.byte 0x3,0x0,0x1,0x2, 0x7,0x4,0x5,0x6, 0xb,0x8,0x9,0xa, 0xf,0xc,0xd,0xe, \
0x3,0x0,0x1,0x2, 0x7,0x4,0x5,0x6, 0xb,0x8,0x9,0xa, 0xf,0xc,0xd,0xe
.size g_ror8_512, .-g_ror8_512
.align 64
g_add4block:
.long 0, 1, 2, 3
.size g_add4block, .-g_add4block
.align 64
g_addsecond4block:
.long 4, 4, 4, 4
.size g_addsecond4block, .-g_addsecond4block
.align 64
g_add8block:
.long 0, 1, 2, 3, 4, 5, 6, 7
.size g_add8block, .-g_add8block
.align 64
g_addsecond8block:
.long 8, 8, 8, 8, 8, 8, 8, 8
.size g_addsecond8block, .-g_addsecond8block
.align 64
g_addOne:
.long 0, 0, 0, 0, 1, 0, 0, 0
.size g_addOne, .-g_addOne
.set IN, %rsi
.set OUT, %rdx
/* QUARTERROUND for one state */
.macro CHACHA20_ROUND s0 s1 s2 s3 cur ror16 ror8
vpaddd \s1, \s0, \s0
vpxor \s0, \s3, \s3
vpshufb (\ror16), \s3, \s3
vpaddd \s3, \s2, \s2
vpxor \s2, \s1, \s1
vmovdqa \s1, \cur
vpsrld $20, \s1, \s1
vpslld $12, \cur, \cur
vpor \cur, \s1, \s1
vpaddd \s1, \s0, \s0
vpxor \s0, \s3, \s3
vpshufb (\ror8), \s3, \s3
vpaddd \s3, \s2, \s2
vpxor \s2, \s1, \s1
vmovdqa \s1, \cur
vpsrld $25, \s1, \s1
vpslld $7, \cur, \cur
vpor \cur, \s1, \s1
.endm
/* QUARTERROUND for two states */
.macro CHACHA20_2_ROUND s0 s1 s2 s3 cur s4 s5 s6 s7 cur1 ror16 ror8
vpaddd \s1, \s0, \s0
vpxor \s0, \s3, \s3
vpshufb (\ror16), \s3, \s3
vpaddd \s3, \s2, \s2
vpxor \s2, \s1, \s1
vmovdqa \s1, \cur
vpsrld $20, \s1, \s1
vpslld $12, \cur, \cur
vpor \cur, \s1, \s1
vpaddd \s1, \s0, \s0
vpxor \s0, \s3, \s3
vpshufb (\ror8), \s3, \s3
vpaddd \s3, \s2, \s2
vpxor \s2, \s1, \s1
vmovdqa \s1, \cur
vpsrld $25, \s1, \s1
vpslld $7, \cur, \cur
vpor \cur, \s1, \s1
vpaddd \s5, \s4, \s4
vpxor \s4, \s7, \s7
vpshufb (\ror16), \s7, \s7
vpaddd \s7, \s6, \s6
vpxor \s6, \s5, \s5
vmovdqa \s5, \cur1
vpsrld $20, \s5, \s5
vpslld $12, \cur1, \cur1
vpor \cur1, \s5, \s5
vpaddd \s5, \s4, \s4
vpxor \s4, \s7, \s7
vpshufb (\ror8), \s7, \s7
vpaddd \s7, \s6, \s6
vpxor \s6, \s5, \s5
vmovdqa \s5, \cur1
vpsrld $25, \s5, \s5
vpslld $7, \cur1, \cur1
vpor \cur1, \s5, \s5
.endm
/* current matrix add original matrix */
.macro LASTADD_MATRIX S0 S1 S2 S3 S4 S5 S6 S7 S8 S9 S10 S11 S12 S13 S14 S15 PER
vpaddd (%rsp), \S0, \S0
vpaddd 1*\PER(%rsp), \S1, \S1
vpaddd 2*\PER(%rsp), \S2, \S2
vpaddd 3*\PER(%rsp), \S3, \S3
vpaddd 4*\PER(%rsp), \S4, \S4
vpaddd 5*\PER(%rsp), \S5, \S5
vpaddd 6*\PER(%rsp), \S6, \S6
vpaddd 7*\PER(%rsp), \S7, \S7
vpaddd 8*\PER(%rsp), \S8, \S8
vpaddd 9*\PER(%rsp), \S9, \S9
vpaddd 10*\PER(%rsp), \S10, \S10
vpaddd 11*\PER(%rsp), \S11, \S11
vpaddd 12*\PER(%rsp), \S12, \S12
vpaddd 13*\PER(%rsp), \S13, \S13
vpaddd 14*\PER(%rsp), \S14, \S14
vpaddd 15*\PER(%rsp), \S15, \S15
.endm
/* write output for left part of 512 bytes (ymm) */
.macro WRITE_BACK_512_L inpos outpos s0 s1 s2 s3 s4 s5 s6 s7 out0 out1 out2 out3
/* {A0 B0 C0 D0 E0 F0 G0 H0} {A1 B1 C1 D1 E1 F1 G1 H1} => {A0 B0 C0 D0 A1 B1 C1 D1} */
vperm2i128 $0x20, \s1, \s0, \out0
vpxor (\inpos), \out0, \out0
vmovdqu \out0, (\outpos) // write back output
vperm2i128 $0x20, \s3, \s2, \out1
vpxor 32(\inpos), \out1, \out1
vmovdqu \out1, 32(\outpos)
vperm2i128 $0x20, \s5, \s4, \out2
vpxor 64(\inpos), \out2, \out2 // write back output
vmovdqu \out2, 64(\outpos)
vperm2i128 $0x20, \s7, \s6, \out3
vpxor 96(\inpos), \out3, \out3
vmovdqu \out3, 96(\outpos)
.endm
/* write output for right part of 512 bytes (ymm) */
.macro WRITE_BACK_512_R inpos outpos s0 s1 s2 s3 s4 s5 s6 s7
/* {A0 B0 C0 D0 E0 F0 G0 H0} {A1 B1 C1 D1 E1 F1 G1 H1} => {E0 F0 G0 H0 E1 F1 G1 H1} */
vperm2i128 $0x31, \s1, \s0, \s1
vpxor (\inpos), \s1, \s1
vmovdqu \s1, (\outpos) // write back output
vperm2i128 $0x31, \s3, \s2, \s3
vpxor 32(\inpos), \s3, \s3
vmovdqu \s3, 32(\outpos)
vperm2i128 $0x31, \s5, \s4, \s5
vpxor 64(\inpos), \s5, \s5
vmovdqu \s5, 64(\outpos) // write back output
vperm2i128 $0x31, \s7, \s6, \s7
vpxor 96(\inpos), \s7, \s7
vmovdqu \s7, 96(\outpos)
.endm
/*
* Processing 64 bytes: 4 xmm registers
* xmm0 ~ xmm3:
* xmm0 {0, 1, 2, 3}
* xmm1 {4, 5, 6, 7}
* xmm2 {8, 9, 10, 11}
* xmm3 {12, 13, 14, 15}
*
* Processing 128 bytes: 8 xmm registers
* xmm0 ~ xmm8:
* xmm0 {0, 1, 2, 3} xmm5 {0, 1, 2, 3}
* xmm1 {4, 5, 6, 7} xmm6 {4, 5, 6, 7}
* xmm2 {8, 9, 10, 11} xmm7 {8, 9, 10, 11}
* xmm3 {12, 13, 14, 15} xmm8 {12, 13, 14, 15}
*
* Processing 256 bytes: 16 xmm registers
* xmm0 ~ xmm15:
* xmm0 {0, 0, 0, 0}
* xmm1 {1, 2, 2, 2}
* xmm2 {3, 3, 3, 3}
* xmm3 {4, 4, 4, 4}
* ...
* xmm15 {15, 15, 15, 15}
*
* Processing 512 bytes: 16 xmm registers
* ymm0 ~ ymm15:
* ymm0 {0, 0, 0, 0}
* ymm1 {1, 2, 2, 2}
* ymm2 {3, 3, 3, 3}
* ymm3 {4, 4, 4, 4}
* ...
* ymm15 {15, 15, 15, 15}
*
*/
/*
* @Interconnection with the C interface:void CHACHA20_Update(CRYPT_CHACHA20_Ctx *ctx, const uint8_t *in, uint8_t *out, uint32_t len);
* @brief chacha20 algorithm
* @param ctx [IN] Algorithm context, which is set by the C interface and transferred.
* @param in [IN] Data to be encrypted
* @param out [OUT] Data after encryption
* @param len [IN] Encrypted length
* esp cannot use 15 available ctx in out len
* 16 registers are needed in one cycle, then
* {0, 1, 4, 5, 8, 9, 12, 13}
* {2, 3, 6, 7, 10, 11, 14, 15}
*/
.globl CHACHA20_Update
.type CHACHA20_Update,%function
.align 64
CHACHA20_Update:
.cfi_startproc
mov 48(%rdi), %r11d
mov %rsp, %rax
subq $1024,%rsp
andq $-512,%rsp
.Lchacha20_start:
cmp $512, %rcx
jae .Lchacha20_512_start
cmp $256, %rcx
jae .Lchacha20_256_start
cmp $128, %rcx
jae .Lchacha20_128_start
cmp $64, %rcx
jae .Lchacha20_64_start
jmp .Lchacha20_end
.Lchacha20_64_start:
LOAD_STATE %xmm0, %xmm1, %xmm2, %xmm3, %rdi
vmovdqa %xmm0, %xmm10
vmovdqa %xmm1, %xmm11
vmovdqa %xmm2, %xmm12
vmovdqa %xmm3, %xmm13
leaq g_ror16(%rip), %r9
leaq g_ror8(%rip), %r10
mov $10, %r8
.Lchacha20_64_loop:
/* 0 = 0 + 4, 12 = (12 ^ 0) >>> 16 | 8 = 8 + 12, 4 = (4 ^ 8) >>> 12 |
* 0 = 0 + 4, 12 = (12 ^ 0) >>> 8 | 8 = 8 + 12, 4 = (4 ^ 8) >>> 7
* 1 = 1 + 5, 13 = (13 ^ 1) >>> 16 | 9 = 9 + 13, 5 = (5 ^ 9) >>> 12 |
* 1 = 1 + 5, 13 = (13 ^ 1) >>> 8 | 9 = 9 + 13, 5 = (5 ^ 9) >>> 7
* 2 = 2 + 6, 14 = (14 ^ 2) >>> 16 | 10 = 10 + 14, 6 = (6 ^ 10)>>> 12 |
* 2 = 2 + 6, 14 = (14 ^ 2) >>> 8 | 10 = 10 + 14, 6 = (6 ^ 10)>>> 7
* 3 = 3 + 7, 15 = (15 ^ 3) >>> 16 | 11 = 11 + 15, 7 = (7 ^ 11)>>> 12 |
* 3 = 3 + 7 ,15 = (15 ^ 3) >>> 8 | 11 = 11 + 15, 7 = (7 ^ 11)>>> 7
*/
CHACHA20_ROUND %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %r9, %r10
vpshufd $78, %xmm2, %xmm2 // {8 9 10 11} ==> {10 11 8 9} 01 00 11 10
vpshufd $57, %xmm1, %xmm1 // {4 5 6 7} ==> {5 6 7 4} 00 11 10 01
vpshufd $147, %xmm3, %xmm3 // {12 13 14 15} ==> {15 12 13 14} 10 01 00 11
/* 0 = 0 + 5 , 15 = (15 ^ 0) >>> 16 | 10 = 10 + 15, 5 = (5 ^ 10) >>> 12 |
* 0 = 0 + 5, 15 = (15 ^ 0) >>> 8 | 10 = 10 + 15, 5 = (5 ^ 10) >>> 7
* 1 = 1 + 6 , 12 = (12 ^ 1) >>> 16 | 11 = 11 + 12, 6 = (6 ^ 11) >>> 12 |
* 1 = 1 + 6, 12 = (12 ^ 1) >>> 8 | 11 = 11 + 12, 6 = (6 ^ 11) >>> 7
* 2 = 2 + 7 , 13 = (13 ^ 2) >>> 16 | 8 = 8 + 13, 7 = (7 ^ 8)>>> 12 |
* 2 = 2 + 7, 13 = (13 ^ 2) >>> 8 | 8 = 8 + 13, 7 = (7 ^ 8)>>> 7
* 3 = 3 + 4 , 14 = (14 ^ 3) >>> 16 | 9 = 9 + 14, 4 = (4 ^ 9)>>> 12 |
* 3 = 3 + 4, 14 = (14 ^ 3) >>> 8 | 9 = 9 + 14, 4 = (4 ^ 9)>>> 7
*/
CHACHA20_ROUND %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %r9, %r10
vpshufd $78, %xmm2, %xmm2 // {10 11 8 9} ==> {8 9 10 11} 01 00 11 10
vpshufd $147, %xmm1, %xmm1 // {5 6 7 4} ==> {4 5 6 7} 00 11 10 01
vpshufd $57, %xmm3, %xmm3 // {15 12 13 14} ==> {12 13 14 15} 10 01 00 11
decq %r8
jnz .Lchacha20_64_loop
vpaddd %xmm10, %xmm0, %xmm0
vpaddd %xmm11, %xmm1, %xmm1
vpaddd %xmm12, %xmm2, %xmm2
vpaddd %xmm13, %xmm3, %xmm3
add $1, %r11d
vpxor 0(IN), %xmm0, %xmm4
vpxor 16(IN), %xmm1, %xmm5
vpxor 32(IN), %xmm2, %xmm6
vpxor 48(IN), %xmm3, %xmm7
vmovdqu %xmm4, 0(OUT)
vmovdqu %xmm5, 16(OUT)
vmovdqu %xmm6, 32(OUT)
vmovdqu %xmm7, 48(OUT)
add $64, IN
add $64, OUT
mov %r11d, 48(%rdi)
jmp .Lchacha20_end
.Lchacha20_128_start:
vbroadcasti128 (%rdi), %ymm0 // {0 1 2 3 0 1 2 3}
vbroadcasti128 16(%rdi), %ymm1 // {4 5 6 7 4 5 6 7}
vbroadcasti128 32(%rdi), %ymm2 // {8 9 10 11 8 9 10 11}
vbroadcasti128 48(%rdi), %ymm3 // {12 13 14 15 12 13 14 15}
vpaddd g_addOne(%rip), %ymm3, %ymm3
vmovdqa %ymm0, %ymm12
vmovdqa %ymm1, %ymm13
vmovdqa %ymm2, %ymm14
vmovdqa %ymm3, %ymm15
leaq g_ror16_128(%rip), %r9
leaq g_ror8_128(%rip), %r10
mov $10, %r8
.Lchacha20_128_loop:
CHACHA20_ROUND %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %r9, %r10
vpshufd $78, %ymm2, %ymm2 // {8 9 10 11} ==> {10 11 8 9} 01 00 11 10
vpshufd $57, %ymm1, %ymm1 // {4 5 6 7} ==> {5 6 7 4} 00 11 10 01
vpshufd $147, %ymm3, %ymm3 // {12 13 14 15} ==> {15 12 13 14} 10 01 00 11
CHACHA20_ROUND %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %r9, %r10
vpshufd $78, %ymm2, %ymm2 // {8 9 10 11} ==> {10 11 8 9} 01 00 11 10
vpshufd $147, %ymm1, %ymm1 // {4 5 6 7} ==> {5 6 7 4} 00 11 10 01
vpshufd $57, %ymm3, %ymm3 // {12 13 14 15} ==> {15 12 13 14} 10 01 00 11
decq %r8
jnz .Lchacha20_128_loop
vpaddd %ymm12, %ymm0, %ymm0
vpaddd %ymm13, %ymm1, %ymm1
vpaddd %ymm14, %ymm2, %ymm2
vpaddd %ymm15, %ymm3, %ymm3
vextracti128 $1, %ymm0, %xmm4 // ymm0 => {xmm0 xmm5}
vextracti128 $1, %ymm1, %xmm5 // ymm1 => {xmm1 xmm6}
vextracti128 $1, %ymm2, %xmm6 // ymm2 => {xmm2 xmm7}
vextracti128 $1, %ymm3, %xmm7 // ymm3 => {xmm3 xmm8}
WRITEBACK_64_AVX2 IN, OUT, %xmm0, %xmm1, %xmm2, %xmm3
add $2, %r11d
WRITEBACK_64_AVX2 IN, OUT, %xmm4, %xmm5, %xmm6, %xmm7
mov %r11d, 48(%rdi)
sub $128, %rcx
jz .Lchacha20_end
jmp .Lchacha20_start
.Lchacha20_256_start:
LOAD_STATE %xmm0, %xmm1, %xmm2, %xmm3, %rdi
STATE_TO_MATRIX %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, \
%xmm11, %xmm12, %xmm13, %xmm14, %xmm15, 0, 16, g_add4block(%rip)
/* move xmm8~11 into stack for CHACHA20_LOOP encryption */
vmovdqa %xmm8, 256(%rsp)
vmovdqa %xmm9, 256+16(%rsp)
vmovdqa %xmm10, 256+32(%rsp)
vmovdqa %xmm11, 256+48(%rsp)
leaq g_ror16(%rip), %r9
leaq g_ror8(%rip), %r10
mov $10, %r8
.Lchacha20_256_loop:
CHACHA20_LOOP %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10 \
%xmm11, %xmm12, %xmm13, %xmm14, %xmm15, 256, 16, %rsp, %r9, %r10
decq %r8
jnz .Lchacha20_256_loop
/* xmm0~15: encrypt matrix 0 ~ 15*/
vmovdqa 256+32(%rsp), %xmm10 // rsp32: encrypt matrix xmm10
vmovdqa 256+48(%rsp), %xmm11
LASTADD_MATRIX %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10 \
%xmm11, %xmm12, %xmm13, %xmm14, %xmm15, 16
/* store xmm9, 10, 13, 14 in stack */
vmovdqa %xmm9, 256(%rsp) // rsp 0: encrypt matrix xmm9
vmovdqa %xmm10, 256+32(%rsp) // rsp32: encrypt matrix xmm9
vmovdqa %xmm13, 256+16(%rsp) // rsp16: encrypt matrix xmm13
vmovdqa %xmm14, 256+48(%rsp) // rsp48: encrypt matrix xmm14
MATRIX_TO_STATE %xmm0, %xmm1, %xmm2, %xmm3, %xmm9, %xmm10 // set state 0, 3, 9, 10
MATRIX_TO_STATE %xmm4, %xmm5, %xmm6, %xmm7, %xmm13, %xmm14 // set state 4, 7, 13, 14
vmovdqa 256(%rsp), %xmm5
vmovdqa 256+32(%rsp), %xmm6
vmovdqa %xmm9, 256(%rsp)
vmovdqa %xmm10, 256+32(%rsp)
MATRIX_TO_STATE %xmm8, %xmm5, %xmm6, %xmm11, %xmm1, %xmm2 // set state 8, 11, 1, 2
vmovdqa 256+16(%rsp), %xmm9
vmovdqa 256+48(%rsp), %xmm10
vmovdqa %xmm13, 256+16(%rsp)
vmovdqa %xmm14, 256+48(%rsp)
MATRIX_TO_STATE %xmm12, %xmm9, %xmm10, %xmm15, %xmm5, %xmm6 // set state 12, 15, 5, 6
vmovdqa 256(%rsp), %xmm9 // rsp 0: state 9
vmovdqa 256+32(%rsp), %xmm10 // rsp32: state 10
vmovdqa 256+16(%rsp), %xmm13 // rsp16: state 13
vmovdqa 256+48(%rsp), %xmm14 // rsp48: state 14
/* finish state calculation, now write result to output */
WRITEBACK_64_AVX2 IN, OUT, %xmm0, %xmm4, %xmm8, %xmm12
WRITEBACK_64_AVX2 IN, OUT, %xmm3, %xmm7, %xmm11, %xmm15
WRITEBACK_64_AVX2 IN, OUT, %xmm9, %xmm13, %xmm1, %xmm5
WRITEBACK_64_AVX2 IN, OUT, %xmm10, %xmm14, %xmm2, %xmm6
add $4, %r11d
sub $256, %rcx
mov %r11d, 48(%rdi)
cmp $256, %rcx
jz .Lchacha20_end
jmp .Lchacha20_start
.Lchacha20_512_start:
LOAD_512_STATE %ymm0 %ymm1 %ymm2 %ymm3 %rdi
STATE_TO_MATRIX %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, %ymm8, %ymm9, \
%ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, 0, 32, g_add8block(%rip)
jmp .Lchacha20_512_run
.Lchacha20_512_start_cont:
LOAD_MATRIX %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, %ymm8, %ymm9, \
%ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, 0, 32, g_addsecond8block(%rip)
.Lchacha20_512_run:
/* move ymm8~11 into stack for CHACHA20_LOOP encryption */
vmovdqa %ymm8, 512(%rsp)
vmovdqa %ymm9, 512+32(%rsp)
vmovdqa %ymm10, 512+64(%rsp)
vmovdqa %ymm11, 512+96(%rsp)
leaq g_ror16_512(%rip), %r9
leaq g_ror8_512(%rip), %r10
mov $10, %r8
.align 32
.Lchacha20_512_loop:
CHACHA20_LOOP %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, %ymm8, %ymm9, %ymm10 \
%ymm11, %ymm12, %ymm13, %ymm14, %ymm15, 512, 32, %rsp, %r9, %r10
decq %r8
jnz .Lchacha20_512_loop
/* ymm0~15: encrypt matrix 0 ~ 15*/
vmovdqa 512+64(%rsp), %ymm10 // rsp64: encrypt matrix ymm10
vmovdqu 512+96(%rsp), %ymm11
LASTADD_MATRIX %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, %ymm8, %ymm9, %ymm10 \
%ymm11, %ymm12, %ymm13, %ymm14, %ymm15, 32
/* store matrix ymm9, 10, 13, 14 in stack */
vmovdqa %ymm9, 512(%rsp) // rsp 0: encrypt matrix ymm9
vmovdqu %ymm10, 512+32(%rsp) // rsp32: encrypt matrix ymm10
vmovdqa %ymm13, 512+64(%rsp) // rsp64: encrypt matrix ymm13
vmovdqu %ymm14, 512+96(%rsp) // rsp96: encrypt matrix ymm14
MATRIX_TO_STATE %ymm0, %ymm1, %ymm2, %ymm3, %ymm9, %ymm10 // set state 0, 3, 9, 10
MATRIX_TO_STATE %ymm4, %ymm5, %ymm6, %ymm7, %ymm13, %ymm14 // set state 4, 7, 13, 14
vmovdqu 512(%rsp), %ymm5
vmovdqa 512+32(%rsp), %ymm6
vmovdqu %ymm9, 512(%rsp)
vmovdqa %ymm10, 512+32(%rsp)
MATRIX_TO_STATE %ymm8, %ymm5, %ymm6, %ymm11, %ymm1, %ymm2 // set state 8, 11, 1, 2
vmovdqa 512+64(%rsp), %ymm9
vmovdqu 512+96(%rsp), %ymm10
vmovdqa %ymm13, 512+64(%rsp)
vmovdqu %ymm14, 512+96(%rsp)
MATRIX_TO_STATE %ymm12, %ymm9, %ymm10, %ymm15, %ymm5, %ymm6 // set state 12, 15, 5, 6
/*
* {A0 A1 A2 A3 E0 E1 E2 E3}
* {B0 B1 B2 B3 F0 F1 F2 F3}
* {C0 C1 C2 C3 G0 G1 G2 G3}
* {D0 D1 D2 D3 H0 H1 H2 H3}
* ...
* =>
* {A0 A1 A2 A3 B0 B1 B2 B3}
* {C0 C1 C2 C3 D0 D1 D2 D3}
* ....
*/
/* left half of ymm registers */
WRITE_BACK_512_L IN, OUT, %ymm0, %ymm4, %ymm8, %ymm12, %ymm3, %ymm7, %ymm11, %ymm15, %ymm9, %ymm10, %ymm13, %ymm14
add $256, IN
add $256, OUT
/* right half of ymm registers */
WRITE_BACK_512_R IN, OUT, %ymm0, %ymm4, %ymm8, %ymm12, %ymm3, %ymm7, %ymm11, %ymm15
sub $128, IN
sub $128, OUT
vmovdqa 512(%rsp), %ymm9
vmovdqu 512+32(%rsp), %ymm10
vmovdqa 512+64(%rsp), %ymm13
vmovdqu 512+96(%rsp), %ymm14
/* second left half of ymm registers */
WRITE_BACK_512_L IN, OUT, %ymm9, %ymm13, %ymm1, %ymm5, %ymm10, %ymm14, %ymm2, %ymm6, %ymm0, %ymm4, %ymm8, %ymm12
add $256, IN
add $256, OUT
/* second right half of ymm registers */
WRITE_BACK_512_R IN, OUT, %ymm9, %ymm13, %ymm1, %ymm5, %ymm10, %ymm14, %ymm2, %ymm6
add $128, IN
add $128, OUT
add $8, %r11d
sub $512, %rcx
mov %r11d, 48(%rdi)
jz .Lchacha20_end
cmp $512, %rcx
jae .Lchacha20_512_start_cont
jmp .Lchacha20_start
.Lchacha20_end:
/* clear sensitive info in stack */
vpxor %ymm0, %ymm0, %ymm0
xor %r11d, %r11d
vmovdqa %ymm0, (%rsp)
vmovdqa %ymm0, 32(%rsp)
vmovdqa %ymm0, 64(%rsp)
vmovdqa %ymm0, 96(%rsp)
vmovdqa %ymm0, 128(%rsp)
vmovdqa %ymm0, 160(%rsp)
vmovdqa %ymm0, 192(%rsp)
vmovdqa %ymm0, 224(%rsp)
vmovdqa %ymm0, 256(%rsp)
vmovdqa %ymm0, 288(%rsp)
vmovdqa %ymm0, 320(%rsp)
vmovdqa %ymm0, 352(%rsp)
vmovdqa %ymm0, 384(%rsp)
vmovdqa %ymm0, 416(%rsp)
vmovdqa %ymm0, 448(%rsp)
vmovdqa %ymm0, 480(%rsp)
vmovdqa %ymm0, 512(%rsp)
vmovdqa %ymm0, 512+32(%rsp)
vmovdqa %ymm0, 512+64(%rsp)
vmovdqa %ymm0, 512+96(%rsp)
mov %rax, %rsp
.cfi_endproc
ret
.size CHACHA20_Update,.-CHACHA20_Update
#endif
| 2302_82127028/openHiTLS-examples_1508 | crypto/chacha20/src/asm/chacha20block_x8664_avx2.S | Unix Assembly | unknown | 20,876 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#include "hitls_build.h"
#ifdef HITLS_CRYPTO_CHACHA20
#include "chacha20_x8664_common.S"
.text
.align 64
g_ror16:
.byte 0x2,0x3,0x0,0x1, 0x6,0x7,0x4,0x5, 0xa,0xb,0x8,0x9, 0xe,0xf,0xc,0xd
.size g_ror16, .-g_ror16
.align 64
g_ror8:
.byte 0x3,0x0,0x1,0x2, 0x7,0x4,0x5,0x6, 0xb,0x8,0x9,0xa, 0xf,0xc,0xd,0xe
.size g_ror8, .-g_ror8
.align 64
g_ror16_128:
.byte 0x2,0x3,0x0,0x1, 0x6,0x7,0x4,0x5, 0xa,0xb,0x8,0x9, 0xe,0xf,0xc,0xd, \
0x2,0x3,0x0,0x1, 0x6,0x7,0x4,0x5, 0xa,0xb,0x8,0x9, 0xe,0xf,0xc,0xd
.size g_ror16_128, .-g_ror16_128
.align 64
g_ror8_128:
.byte 0x3,0x0,0x1,0x2, 0x7,0x4,0x5,0x6, 0xb,0x8,0x9,0xa, 0xf,0xc,0xd,0xe, \
0x3,0x0,0x1,0x2, 0x7,0x4,0x5,0x6, 0xb,0x8,0x9,0xa, 0xf,0xc,0xd,0xe
.size g_ror8_128, .-g_ror8_128
.align 64
g_addOne:
.long 0, 0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 0
.size g_addOne, .-g_addOne
.align 64
g_add4block:
.long 0, 1, 2, 3
.size g_add4block, .-g_add4block
.align 64
g_addsecond4block:
.long 4, 4, 4, 4
.size g_addsecond4block, .-g_addsecond4block
.align 64
g_add8block:
.long 0, 1, 2, 3, 4, 5, 6, 7
.size g_add8block, .-g_add8block
.align 64
g_addsecond8block:
.long 8, 8, 8, 8, 8, 8, 8, 8
.size g_addsecond8block, .-g_addsecond8block
.align 64
g_add16block:
.long 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
.size g_add16block, .-g_add16block
.align 64
g_addsecond16block:
.long 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16
.size g_addsecond16block, .-g_addsecond16block
.set IN, %rsi
.set OUT, %rdx
/*
* Processing 64 bytes: 4 x registers, number of instructions in a single loop: 21*2 = 42
* xmm0 ~ xmm3:
* xmm0 {0, 1, 2, 3}
* xmm1 {4, 5, 6, 7}
* xmm2 {8, 9, 10, 11}
* xmm3 {12, 13, 14, 15}
*
* Processing 128-256 bytes: 4 x registers, number of instructions in a single loop:30
* ymm0 ~ ymm3:
* ymm0 {0, 1, 2, 3, 0, 1, 2, 3 }
* ymm1 {4, 5, 6, 7, 4, 5, 6, 7 }
* ymm2 {8, 9, 10, 11, 8, 9, 10, 11}
* ymm3 {12, 13, 14, 15, 12, 13, 14, 15}
*
* Processing 512 bytes: y registers 0-15, 128 stack space and y registers 16-31,number of instructions
*in a single loop:12*8 = 96
* Processing 1024 bytes: z registers 0-15, 256 stack space and z registers 16-31, number of instructions
* in a single loop:12*8 = 96
* ymm0 ~ ymm15:
* ymm0 {0, 0, 0, 0, 0, 0, 0, 0}
* ymm1 {1, 1, 1, 1, 1, 1, 1, 1}
* ymm2 {2, 2, 2, 2, 2, 2, 2, 2}
* ymm3 {3, 3, 3, 3, 3, 3, 3, 3}
* ......
* ymm15 {15, 15, 15, 15, 15, 15, 15, 15}
*
* zmm0 ~ zmm31:
* zmm0 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
* zmm1 {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}
* zmm2 {2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2}
* zmm3 {3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3}
* ...
* zmm15 {15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15}
*/
.macro CHACHA20_ROUND s0 s1 s2 s3
vpaddd \s1, \s0, \s0
vpxord \s0, \s3, \s3
vprold $16, \s3, \s3
vpaddd \s3, \s2, \s2
vpxord \s2, \s1, \s1
vprold $12, \s1, \s1
vpaddd \s1, \s0, \s0
vpxord \s0, \s3, \s3
vprold $8, \s3, \s3
vpaddd \s3, \s2, \s2
vpxord \s2, \s1, \s1
vprold $7, \s1, \s1
.endm
/* convert y registers and write back */
.macro CONVERT_Y s0 s1 pos inpos outpos
/* ymm16 => {xmm16, xmm17} */
vextracti32x4 \pos, \s0, %xmm16
vextracti32x4 \pos, \s1, %xmm17
vinserti32x4 $1, %xmm17, %ymm16, %ymm16
vpxord (IN), %ymm16, %ymm16
vmovdqu64 %ymm16, (OUT)
add $32, \inpos
add $32, \outpos
.endm
/* convert z registers and write back */
.macro CONVERT_Z s0 s1 s2 s3 pos inpos outpos
/* zmm16 => {xmm16, xmm17, xmm18, xmm19} */
vextracti64x2 \pos, \s0, %xmm16
vextracti64x2 \pos, \s1, %xmm17
vextracti64x2 \pos, \s2, %xmm18
vextracti64x2 \pos, \s3, %xmm19
vinserti64x2 $1, %xmm17, %zmm16, %zmm16
vinserti64x2 $2, %xmm18, %zmm16, %zmm16
vinserti64x2 $3, %xmm19, %zmm16, %zmm16
vpxord (IN), %zmm16, %zmm16
vmovdqu64 %zmm16, (OUT)
add $64, \inpos
add $64, \outpos
.endm
/**
* @Interconnection with the C interface:void CHACHA20_Update(CRYPT_CHACHA20_Ctx *ctx, const uint8_t *in, uint8_t *out, uint32_t len);
* @brief chacha20 algorithm
* @param ctx [IN] Algorithm context, which is set by the C interface and transferred.
* @param in [IN] Data to be encrypted
* @param out [OUT] Data after encryption
* @param len [IN] Encrypted length
* esp cannot use 15 available ctx in out len
* 16 registers are needed in one cycle, then
* {0, 1, 4, 5, 8, 9, 12, 13}
* {2, 3, 6, 7, 10, 11, 14, 15}
**/
.globl CHACHA20_Update
.type CHACHA20_Update,%function
.align 64
CHACHA20_Update:
.cfi_startproc
mov 48(%rdi), %r11d
mov %rsp, %r9
subq $2048,%rsp
andq $-1024,%rsp
.Lchacha20_start:
cmp $1024, %rcx
jae .Lchacha20_1024_start
cmp $512, %rcx
jae .Lchacha20_512_start
cmp $256, %rcx
jae .Lchacha20_256_start
cmp $128, %rcx
jae .Lchacha20_128_start
cmp $64, %rcx
jae .Lchacha20_64_start
jmp .Lchacha20_end
.Lchacha20_64_start:
LOAD_STATE %xmm0, %xmm1, %xmm2, %xmm3, %rdi
vmovdqa %xmm0, %xmm10
vmovdqa %xmm1, %xmm11
vmovdqa %xmm2, %xmm12
vmovdqa %xmm3, %xmm13
mov $10, %r8
.Lchacha20_64_loop:
/* 0 = 0 + 4, 12 = (12 ^ 0) >>> 16 | 8 = 8 + 12, 4 = (4 ^ 8) >>> 12 |
* 0 = 0 + 4, 12 = (12 ^ 0) >>> 8 | 8 = 8 + 12, 4 = (4 ^ 8) >>> 7
* 1 = 1 + 5, 13 = (13 ^ 1) >>> 16 | 9 = 9 + 13, 5 = (5 ^ 9) >>> 12 |
* 1 = 1 + 5, 13 = (13 ^ 1) >>> 8 | 9 = 9 + 13, 5 = (5 ^ 9) >>> 7
* 2 = 2 + 6, 14 = (14 ^ 2) >>> 16 | 10 = 10 + 14, 6 = (6 ^ 10)>>> 12 |
* 2 = 2 + 6, 14 = (14 ^ 2) >>> 8 | 10 = 10 + 14, 6 = (6 ^ 10)>>> 7
* 3 = 3 + 7, 15 = (15 ^ 3) >>> 16 | 11 = 11 + 15, 7 = (7 ^ 11)>>> 12 |
* 3 = 3 + 7 ,15 = (15 ^ 3) >>> 8 | 11 = 11 + 15, 7 = (7 ^ 11)>>> 7
*/
CHACHA20_ROUND %xmm0, %xmm1, %xmm2, %xmm3
vpshufd $78, %xmm2, %xmm2 // {8 9 10 11} ==> {10 11 8 9} 01 00 11 10
vpshufd $57, %xmm1, %xmm1 // {4 5 6 7} ==> {5 6 7 4} 00 11 10 01
vpshufd $147, %xmm3, %xmm3 // {12 13 14 15} ==> {15 12 13 14} 10 01 00 11
/* 0 = 0 + 5 , 15 = (15 ^ 0) >>> 16 | 10 = 10 + 15, 5 = (5 ^ 10) >>> 12 |
* 0 = 0 + 5, 15 = (15 ^ 0) >>> 8 | 10 = 10 + 15, 5 = (5 ^ 10) >>> 7
* 1 = 1 + 6 , 12 = (12 ^ 1) >>> 16 | 11 = 11 + 12, 6 = (6 ^ 11) >>> 12 |
* 1 = 1 + 6, 12 = (12 ^ 1) >>> 8 | 11 = 11 + 12, 6 = (6 ^ 11) >>> 7
* 2 = 2 + 7 , 13 = (13 ^ 2) >>> 16 | 8 = 8 + 13, 7 = (7 ^ 8)>>> 12 |
* 2 = 2 + 7, 13 = (13 ^ 2) >>> 8 | 8 = 8 + 13, 7 = (7 ^ 8)>>> 7
* 3 = 3 + 4 , 14 = (14 ^ 3) >>> 16 | 9 = 9 + 14, 4 = (4 ^ 9)>>> 12 |
* 3 = 3 + 4, 14 = (14 ^ 3) >>> 8 | 9 = 9 + 14, 4 = (4 ^ 9)>>> 7
*/
CHACHA20_ROUND %xmm0, %xmm1, %xmm2, %xmm3
vpshufd $78, %xmm2, %xmm2 // {10 11 8 9} ==> {8 9 10 11} 01 00 11 10
vpshufd $147, %xmm1, %xmm1 // {5 6 7 4} ==> {4 5 6 7} 00 11 10 01
vpshufd $57, %xmm3, %xmm3 // {15 12 13 14} ==> {12 13 14 15} 10 01 00 11
decq %r8
jnz .Lchacha20_64_loop
vpaddd %xmm10, %xmm0, %xmm0
vpaddd %xmm11, %xmm1, %xmm1
vpaddd %xmm12, %xmm2, %xmm2
vpaddd %xmm13, %xmm3, %xmm3
add $1, %r11d
WRITEBACK_64_AVX512 IN, OUT, %xmm0, %xmm1, %xmm2, %xmm3
mov %r11d, 48(%rdi)
jmp .Lchacha20_end
.Lchacha20_128_start:
vbroadcasti128 (%rdi), %ymm0 // {0 1 2 3 0 1 2 3}
vbroadcasti128 16(%rdi), %ymm1 // {4 5 6 7 4 5 6 7}
vbroadcasti128 32(%rdi), %ymm2 // {8 9 10 11 8 9 10 11}
vbroadcasti128 48(%rdi), %ymm3 // {12 13 14 15 12 13 14 15}
vpaddd g_addOne(%rip), %ymm3, %ymm3
vmovdqa32 %ymm0, %ymm16
vmovdqa32 %ymm1, %ymm17
vmovdqa32 %ymm2, %ymm18
vmovdqa32 %ymm3, %ymm19
mov $10, %r8
.Lchacha20_128_loop:
CHACHA20_ROUND %ymm0, %ymm1, %ymm2, %ymm3
vpshufd $78, %ymm2, %ymm2 // {8 9 10 11} ==> {10 11 8 9} 01 00 11 10
vpshufd $57, %ymm1, %ymm1 // {4 5 6 7} ==> {5 6 7 4} 00 11 10 01
vpshufd $147, %ymm3, %ymm3 // {12 13 14 15} ==> {15 12 13 14} 10 01 00 11
CHACHA20_ROUND %ymm0, %ymm1, %ymm2, %ymm3
vpshufd $78, %ymm2, %ymm2 // {8 9 10 11} ==> {10 11 8 9} 01 00 11 10
vpshufd $147, %ymm1, %ymm1 // {4 5 6 7} ==> {5 6 7 4} 00 11 10 01
vpshufd $57, %ymm3, %ymm3 // {12 13 14 15} ==> {15 12 13 14} 10 01 00 11
decq %r8
jnz .Lchacha20_128_loop
vpaddd %ymm16, %ymm0, %ymm0
vpaddd %ymm17, %ymm1, %ymm1
vpaddd %ymm18, %ymm2, %ymm2
vpaddd %ymm19, %ymm3, %ymm3
vextracti32x4 $1, %ymm0, %xmm5 // ymm0 => {xmm0 xmm5}
vextracti32x4 $1, %ymm1, %xmm6 // ymm1 => {xmm1 xmm6}
vextracti32x4 $1, %ymm2, %xmm7 // ymm2 => {xmm2 xmm7}
vextracti32x4 $1, %ymm3, %xmm8 // ymm3 => {xmm3 xmm8}
WRITEBACK_64_AVX512 IN, OUT, %xmm0, %xmm1, %xmm2, %xmm3
WRITEBACK_64_AVX512 IN, OUT, %xmm5, %xmm6, %xmm7, %xmm8
add $2, %r11d
sub $128, %rcx
mov %r11d, 48(%rdi)
jz .Lchacha20_end
jmp .Lchacha20_start
.Lchacha20_256_start:
LOAD_1024_STATE %zmm0 %zmm1 %zmm2 %zmm3 %rdi
vpaddd g_addOne(%rip), %zmm3, %zmm3
vmovdqa64 %zmm0, %zmm16
vmovdqa64 %zmm1, %zmm17
vmovdqa64 %zmm2, %zmm18
vmovdqa64 %zmm3, %zmm19
mov $10, %r8
.Lchacha20_256_loop:
CHACHA20_ROUND %zmm0, %zmm1, %zmm2, %zmm3
vpshufd $78, %zmm2, %zmm2 // {8 9 10 11} ==> {10 11 8 9} 01 00 11 10
vpshufd $57, %zmm1, %zmm1 // {4 5 6 7} ==> {5 6 7 4} 00 11 10 01
vpshufd $147, %zmm3, %zmm3 // {12 13 14 15} ==> {15 12 13 14} 10 01 00 11
CHACHA20_ROUND %zmm0, %zmm1, %zmm2, %zmm3
vpshufd $78, %zmm2, %zmm2 // {8 9 10 11} ==> {10 11 8 9} 01 00 11 10
vpshufd $147, %zmm1, %zmm1 // {4 5 6 7} ==> {5 6 7 4} 00 11 10 01
vpshufd $57, %zmm3, %zmm3 // {12 13 14 15} ==> {15 12 13 14} 10 01 00 11
decq %r8
jnz .Lchacha20_256_loop
vpaddd %zmm16, %zmm0, %zmm0
vpaddd %zmm17, %zmm1, %zmm1
vpaddd %zmm18, %zmm2, %zmm2
vpaddd %zmm19, %zmm3, %zmm3
vextracti64x2 $1, %zmm0, %xmm4
vextracti64x2 $1, %zmm1, %xmm5
vextracti64x2 $1, %zmm2, %xmm6
vextracti64x2 $1, %zmm3, %xmm7
vextracti64x2 $2, %zmm0, %xmm8
vextracti64x2 $2, %zmm1, %xmm9
vextracti64x2 $2, %zmm2, %xmm10
vextracti64x2 $2, %zmm3, %xmm11
vextracti64x2 $3, %zmm0, %xmm12
vextracti64x2 $3, %zmm1, %xmm13
vextracti64x2 $3, %zmm2, %xmm14
vextracti64x2 $3, %zmm3, %xmm15
WRITEBACK_64_AVX512 IN, OUT, %xmm0, %xmm1, %xmm2, %xmm3
WRITEBACK_64_AVX512 IN, OUT, %xmm4, %xmm5, %xmm6, %xmm7
WRITEBACK_64_AVX512 IN, OUT, %xmm8, %xmm9, %xmm10, %xmm11
WRITEBACK_64_AVX512 IN, OUT, %xmm12, %xmm13, %xmm14, %xmm15
add $4, %r11d
sub $256, %rcx
mov %r11d, 48(%rdi)
jz .Lchacha20_end
jmp .Lchacha20_start
.Lchacha20_512_start:
LOAD_512_STATE %ymm0, %ymm1, %ymm2, %ymm3, %rdi
vpshufd $0b00000000, %ymm3, %ymm12
vpshufd $0b01010101, %ymm3, %ymm13
vpaddd g_add8block(%rip), %ymm12, %ymm12 // 0, 1, 2, 3, 4, 5, 6 ,7
vmovdqa32 %ymm12, %ymm28
vpshufd $0b10101010, %ymm3, %ymm14
vmovdqa32 %ymm13, %ymm29
vpshufd $0b11111111, %ymm3, %ymm15
vmovdqa32 %ymm14, %ymm30
vpshufd $0b00000000, %ymm2, %ymm8
vmovdqa32 %ymm15, %ymm31
vpshufd $0b01010101, %ymm2, %ymm9
vmovdqa32 %ymm8, %ymm24
vpshufd $0b10101010, %ymm2, %ymm10
vmovdqa32 %ymm9, %ymm25
vpshufd $0b11111111, %ymm2, %ymm11
vmovdqa32 %ymm10, %ymm26
vpshufd $0b00000000, %ymm1, %ymm4
vmovdqa32 %ymm11, %ymm27
vpshufd $0b01010101, %ymm1, %ymm5
vmovdqa32 %ymm4, %ymm20
vpshufd $0b10101010, %ymm1, %ymm6
vmovdqa32 %ymm5, %ymm21
vpshufd $0b11111111, %ymm1, %ymm7
vmovdqa32 %ymm6, %ymm22
vpshufd $0b11111111, %ymm0, %ymm3
vmovdqa32 %ymm7, %ymm23
vpshufd $0b10101010, %ymm0, %ymm2
vmovdqa32 %ymm3, %ymm19
vpshufd $0b01010101, %ymm0, %ymm1
vmovdqa32 %ymm2, %ymm18
vpshufd $0b00000000, %ymm0, %ymm0
vmovdqa32 %ymm1, %ymm17
vmovdqa32 %ymm0, %ymm16
mov $10, %r8
.Lchacha20_512_loop:
CHACHA20_LOOP_AVX512 %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, %ymm8, %ymm9, \
%ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15
decq %r8
jnz .Lchacha20_512_loop
/* ymm16~31: original matrix */
vpaddd %ymm16, %ymm0, %ymm0
vpaddd %ymm17, %ymm1, %ymm1
vpaddd %ymm18, %ymm2, %ymm2
vpaddd %ymm19, %ymm3, %ymm3
vpaddd %ymm20, %ymm4, %ymm4
vpaddd %ymm21, %ymm5, %ymm5
vpaddd %ymm22, %ymm6, %ymm6
vpaddd %ymm23, %ymm7, %ymm7
vpaddd %ymm24, %ymm8, %ymm8
vpaddd %ymm25, %ymm9, %ymm9
vpaddd %ymm26, %ymm10, %ymm10
vpaddd %ymm27, %ymm11, %ymm11
vpaddd %ymm28, %ymm12, %ymm12
vpaddd %ymm29, %ymm13, %ymm13
vpaddd %ymm30, %ymm14, %ymm14
vpaddd %ymm31, %ymm15, %ymm15
MATRIX_TO_STATE %ymm0, %ymm1, %ymm2, %ymm3, %ymm20, %ymm21 // set state 0, 3, 9, 10
MATRIX_TO_STATE %ymm4, %ymm5, %ymm6, %ymm7, %ymm22, %ymm23 // set state 4, 7, 13, 14
MATRIX_TO_STATE %ymm8, %ymm9, %ymm10, %ymm11, %ymm1, %ymm2 // set state 8, 11, 1, 2
MATRIX_TO_STATE %ymm12, %ymm13, %ymm14, %ymm15, %ymm5, %ymm6 // set state 12, 15, 5, 6
/*
* {A0 A1 A2 A3 E0 E1 E2 E3}
* {B0 B1 B2 B3 F0 F1 F2 F3}
* {C0 C1 C2 C3 G0 G1 G2 G3}
* {D0 D1 D2 D3 H0 H1 H2 H3}
* ...
* =>
* {A0 A1 A2 A3 B0 B1 B2 B3}
* {C0 C1 C2 C3 D0 D1 D2 D3}
* ....
*/
CONVERT_Y %ymm0, %ymm4, $0 IN OUT
CONVERT_Y %ymm8, %ymm12, $0 IN OUT
CONVERT_Y %ymm3, %ymm7, $0 IN OUT
CONVERT_Y %ymm11, %ymm15, $0 IN OUT
CONVERT_Y %ymm20, %ymm22, $0 IN OUT
CONVERT_Y %ymm1, %ymm5, $0 IN OUT
CONVERT_Y %ymm21, %ymm23, $0 IN OUT
CONVERT_Y %ymm2, %ymm6, $0 IN OUT
CONVERT_Y %ymm0, %ymm4, $1 IN OUT
CONVERT_Y %ymm8, %ymm12, $1 IN OUT
CONVERT_Y %ymm3, %ymm7, $1 IN OUT
CONVERT_Y %ymm11, %ymm15, $1 IN OUT
CONVERT_Y %ymm20, %ymm22, $1 IN OUT
CONVERT_Y %ymm1, %ymm5, $1 IN OUT
CONVERT_Y %ymm21, %ymm23, $1 IN OUT
CONVERT_Y %ymm2, %ymm6, $1 IN OUT
add $8, %r11d
sub $512, %rcx
mov %r11d, 48(%rdi)
jz .Lchacha20_end
jmp .Lchacha20_start
.Lchacha20_1024_start:
LOAD_1024_STATE %zmm0 %zmm1 %zmm2 %zmm3 %rdi
STATE_TO_MATRIX_Z_AVX512 %zmm0, %zmm16, %zmm17, %zmm18, %zmm19
STATE_TO_MATRIX_Z_AVX512 %zmm1, %zmm20, %zmm21, %zmm22, %zmm23
STATE_TO_MATRIX_Z_AVX512 %zmm2, %zmm24, %zmm25, %zmm26, %zmm27
STATE_TO_MATRIX_Z_AVX512 %zmm3, %zmm28, %zmm29, %zmm30, %zmm31
vpaddd g_add16block(%rip), %zmm28, %zmm28
vmovdqa64 %zmm16, %zmm0
vmovdqa64 %zmm17, %zmm1
vmovdqa64 %zmm18, %zmm2
vmovdqa64 %zmm19, %zmm3
vmovdqa64 %zmm20, %zmm4
vmovdqa64 %zmm21, %zmm5
vmovdqa64 %zmm22, %zmm6
vmovdqa64 %zmm23, %zmm7
vmovdqa64 %zmm24, %zmm8
vmovdqa64 %zmm25, %zmm9
vmovdqa64 %zmm26, %zmm10
vmovdqa64 %zmm27, %zmm11
vmovdqa64 %zmm28, %zmm12
vmovdqa64 %zmm29, %zmm13
vmovdqa64 %zmm30, %zmm14
vmovdqa64 %zmm31, %zmm15
mov $10, %r8
jmp .Lchacha20_1024_loop
.Lchacha20_1024_start_cont:
vmovdqa32 %zmm16, %zmm0
vmovdqa32 %zmm17, %zmm1
vmovdqa32 %zmm18, %zmm2
vmovdqa32 %zmm19, %zmm3
vmovdqa32 %zmm20, %zmm4
vmovdqa32 %zmm21, %zmm5
vmovdqa32 %zmm22, %zmm6
vmovdqa32 %zmm23, %zmm7
vmovdqa32 %zmm24, %zmm8
vmovdqa32 %zmm25, %zmm9
vmovdqa32 %zmm26, %zmm10
vmovdqa32 %zmm27, %zmm11
vmovdqa32 %zmm28, %zmm12
vmovdqa32 %zmm29, %zmm13
vpaddd g_addsecond16block(%rip), %zmm12, %zmm12 // add 8, 8, 8, 8, 8, 8, 8, 8 or 4, 4, 4, 4
vmovdqa32 %zmm30, %zmm14
vmovdqa32 %zmm31, %zmm15
vmovdqa32 %zmm12, %zmm28
mov $10, %r8
.Lchacha20_1024_loop:
CHACHA20_LOOP_AVX512 %zmm0, %zmm1, %zmm2, %zmm3, %zmm4, %zmm5, %zmm6, %zmm7, %zmm8, %zmm9, \
%zmm10, %zmm11, %zmm12, %zmm13, %zmm14, %zmm15
decq %r8
jnz .Lchacha20_1024_loop
vpaddd %zmm16, %zmm0, %zmm0
vpaddd %zmm17, %zmm1, %zmm1
vpaddd %zmm18, %zmm2, %zmm2
vpaddd %zmm19, %zmm3, %zmm3
vpaddd %zmm20, %zmm4, %zmm4
vpaddd %zmm21, %zmm5, %zmm5
vpaddd %zmm22, %zmm6, %zmm6
vpaddd %zmm23, %zmm7, %zmm7
vpaddd %zmm24, %zmm8, %zmm8
vpaddd %zmm25, %zmm9, %zmm9
vpaddd %zmm26, %zmm10, %zmm10
vpaddd %zmm27, %zmm11, %zmm11
vpaddd %zmm28, %zmm12, %zmm12
vpaddd %zmm29, %zmm13, %zmm13
vpaddd %zmm30, %zmm14, %zmm14
vpaddd %zmm31, %zmm15, %zmm15
/* store matrix 16, 17, 18, 19 in stack */
vmovdqa64 %zmm16, (%rsp)
vmovdqa64 %zmm17, 64(%rsp)
vmovdqa64 %zmm18, 128(%rsp)
vmovdqa64 %zmm19, 192(%rsp)
/* store matrix 9, 10, 13, 14 in zmm16, 17, 18, 19 */
vmovdqa64 %zmm9, %zmm16 // zmm16: encrypt matrix zmm9
vmovdqa64 %zmm10, %zmm17 // zmm17: encrypt matrix zmm10
vmovdqa64 %zmm13, %zmm18 // zmm18: encrypt matrix zmm13
vmovdqa64 %zmm14, %zmm19 // zmm19: encrypt matrix zmm14
/* zmm0~15: encrypt matrix 0 ~ 15*/
MATRIX_TO_STATE %zmm0, %zmm1, %zmm2, %zmm3, %zmm9, %zmm10 // set state 0, 3, 9, 10
MATRIX_TO_STATE %zmm4, %zmm5, %zmm6, %zmm7, %zmm13, %zmm14 // set state 4, 7, 13, 14
MATRIX_TO_STATE %zmm8, %zmm16, %zmm17, %zmm11, %zmm1, %zmm2 // set state 8, 11, 1, 2
MATRIX_TO_STATE %zmm12, %zmm18, %zmm19, %zmm15, %zmm5, %zmm6 // set state 12, 15, 5, 6
/*
* {A0 A1 A2 A3 E0 E1 E2 E3 I0 I1 I2 I3 M0 M1 M2 M3}
* {B0 B1 B2 B3 F0 F1 F2 F3 J0 J1 J2 J3 N0 N1 N2 N3}
* {C0 C1 C2 C3 G0 G1 G2 G3 K0 K1 K2 K3 O0 O1 O2 O3}
* {D0 D1 D2 D3 H0 H1 H2 H3 L0 L1 L2 L3 P0 P1 P2 P3}
* ...
* =>
* {A0 A1 A2 A3 B0 B1 B2 B3 C0 C1 C2 C3 D0 D1 D2 D3}
* {E0 E1 E2 E3 F0 F1 F2 F3 G0 G1 G2 G3 H0 H1 H2 H3}
* {I0 I1 I2 I3 J0 J1 J2 J3 K0 K1 K2 K3 L0 L1 L2 L3}
* ....
*/
CONVERT_Z %zmm0, %zmm4, %zmm8, %zmm12, $0 IN OUT
CONVERT_Z %zmm3, %zmm7, %zmm11, %zmm15, $0 IN OUT
CONVERT_Z %zmm9, %zmm13, %zmm1, %zmm5, $0 IN OUT
CONVERT_Z %zmm10, %zmm14, %zmm2, %zmm6, $0 IN OUT
CONVERT_Z %zmm0, %zmm4, %zmm8, %zmm12, $1 IN OUT
CONVERT_Z %zmm3, %zmm7, %zmm11, %zmm15, $1 IN OUT
CONVERT_Z %zmm9, %zmm13, %zmm1, %zmm5, $1 IN OUT
CONVERT_Z %zmm10, %zmm14, %zmm2, %zmm6, $1 IN OUT
CONVERT_Z %zmm0, %zmm4, %zmm8, %zmm12, $2 IN OUT
CONVERT_Z %zmm3, %zmm7, %zmm11, %zmm15, $2 IN OUT
CONVERT_Z %zmm9, %zmm13, %zmm1, %zmm5, $2 IN OUT
CONVERT_Z %zmm10, %zmm14, %zmm2, %zmm6, $2 IN OUT
CONVERT_Z %zmm0, %zmm4, %zmm8, %zmm12, $3 IN OUT
CONVERT_Z %zmm3, %zmm7, %zmm11, %zmm15, $3 IN OUT
CONVERT_Z %zmm9, %zmm13, %zmm1, %zmm5, $3 IN OUT
CONVERT_Z %zmm10, %zmm14, %zmm2, %zmm6, $3 IN OUT
/* store zmm16~19 in stack */
vmovdqa64 (%rsp), %zmm16
vmovdqa64 64(%rsp), %zmm17
vmovdqa64 128(%rsp), %zmm18
vmovdqa64 192(%rsp), %zmm19
add $16, %r11d
sub $1024, %rcx
mov %r11d, 48(%rdi)
jz .Lchacha20_clear
cmp $1024, %rcx
jae .Lchacha20_1024_start_cont
jmp .Lchacha20_start
.Lchacha20_clear:
/* clear sensitive info in stack */
vpxord %zmm0, %zmm0, %zmm0
vmovdqa64 %zmm0, (%rsp)
vmovdqa64 %zmm0, 64(%rsp)
vmovdqa64 %zmm0, 128(%rsp)
vmovdqa64 %zmm0, 192(%rsp)
.Lchacha20_end:
xor %r11d, %r11d
mov %r9, %rsp
.cfi_endproc
ret
.size CHACHA20_Update,.-CHACHA20_Update
#endif
| 2302_82127028/openHiTLS-examples_1508 | crypto/chacha20/src/asm/chacha20block_x8664_avx512.S | Unix Assembly | unknown | 21,249 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#include "hitls_build.h"
#ifdef HITLS_CRYPTO_CHACHA20
.text
.LAndBlock:
.long 1, 0, 0, 0
.LRor16:
.byte 0x2,0x3,0x0,0x1, 0x6,0x7,0x4,0x5, 0xa,0xb,0x8,0x9, 0xe,0xf,0xc,0xd
.LRor8:
.byte 0x3,0x0,0x1,0x2, 0x7,0x4,0x5,0x6, 0xb,0x8,0x9,0xa, 0xf,0xc,0xd,0xe
.set IN, %r9
.set OUT, %r10
/* Original State */
.set O00, %xmm12
.set O01, %xmm13
.set O02, %xmm14
.set O03, %xmm15
/* State 0 */
.set S00, %xmm0 // LINE 0 STATE 0
.set S01, %xmm1 // LINE 1 STATE 0
.set S02, %xmm2 // LINE 2 STATE 0
.set S03, %xmm3 // LINE 3 STATE 0
/* State 1 */
.set S10, %xmm5 // LINE 0 STATE 1
.set S11, %xmm6 // LINE 1 STATE 1
.set S12, %xmm7 // LINE 2 STATE 1
.set S13, %xmm8 // LINE 3 STATE 1
.macro CHACHA20_ROUND S0 S1 S2 S3 CUR
paddd \S1, \S0
pxor \S0, \S3
pshufb .LRor16(%rip), \S3
paddd \S3, \S2
pxor \S2, \S1
movdqa \S1, \CUR
psrld $20, \S1
pslld $12, \CUR
por \CUR, \S1
paddd \S1, \S0
pxor \S0, \S3
pshufb .LRor8(%rip), \S3
paddd \S3, \S2
pxor \S2, \S1
movdqa \S1, \CUR
psrld $25, \S1
pslld $7, \CUR
por \CUR, \S1
.endm
/* QUARTERROUND for two states */
.macro CHACHA20_2_ROUND S0 S1 S2 S3 CUR S4 S5 S6 S7 CUR1
paddd \S1, \S0
pxor \S0, \S3
pshufb .LRor16(%rip), \S3
paddd \S3, \S2
pxor \S2, \S1
movdqa \S1, \CUR
psrld $20, \S1
pslld $12, \CUR
por \CUR, \S1
paddd \S1, \S0
pxor \S0, \S3
pshufb .LRor8(%rip), \S3
paddd \S3, \S2
pxor \S2, \S1
movdqa \S1, \CUR
psrld $25, \S1
pslld $7, \CUR
por \CUR, \S1
paddd \S5, \S4
pxor \S4, \S7
pshufb .LRor16(%rip), \S7
paddd \S7, \S6
pxor \S6, \S5
movdqa \S5, \CUR1
psrld $20, \S5
pslld $12, \CUR1
por \CUR1, \S5
paddd \S5, \S4
pxor \S4, \S7
pshufb .LRor8(%rip), \S7
paddd \S7, \S6
pxor \S6, \S5
movdqa \S5, \CUR1
psrld $25, \S5
pslld $7, \CUR1
por \CUR1, \S5
.endm
/* final add & xor for 64 bytes */
.macro WRITE_BACK_64 IN_POS OUT_POS
paddd O00, S00
paddd O01, S01
paddd O02, S02
paddd O03, S03
movdqu (\IN_POS), %xmm4 // get input
movdqu 16(\IN_POS), %xmm9
movdqu 32(\IN_POS), %xmm10
movdqu 48(\IN_POS), %xmm11
pxor %xmm4, S00
pxor %xmm9, S01
pxor %xmm10, S02
pxor %xmm11, S03
movdqu S00, (\OUT_POS) // write back output
movdqu S01, 16(\OUT_POS)
movdqu S02, 32(\OUT_POS)
movdqu S03, 48(\OUT_POS)
.endm
/* final add & xor for 128 bytes */
.macro WRITE_BACK_128 IN_POS OUT_POS
paddd O00, S00 // state 0 + origin state 0
paddd O01, S01
paddd O02, S02
paddd O03, S03
pinsrd $0, %r11d, O03 // change Original state 0 to Original state 1
paddd O00, S10 // state 1 + origin state 1
paddd O01, S11
paddd O02, S12
paddd O03, S13
movdqu (\IN_POS), %xmm4 // get input 0
movdqu 16(\IN_POS), %xmm9
movdqu 32(\IN_POS), %xmm10
movdqu 48(\IN_POS), %xmm11
pxor %xmm4, S00 // input 0 ^ state 0
pxor %xmm9, S01
pxor %xmm10, S02
pxor %xmm11, S03
movdqu S00, (\OUT_POS) // write back to output 0
movdqu S01, 16(\OUT_POS)
movdqu S02, 32(\OUT_POS)
movdqu S03, 48(\OUT_POS)
movdqu 64(\IN_POS), %xmm4 // get input 1
movdqu 80(\IN_POS), %xmm9
movdqu 96(\IN_POS), %xmm10
movdqu 112(\IN_POS), %xmm11
pxor %xmm4, S10 // input 1 ^ state 1
pxor %xmm9, S11
pxor %xmm10, S12
pxor %xmm11, S13
movdqu S10, 64(\OUT_POS) // write back to output 1
movdqu S11, 80(\OUT_POS)
movdqu S12, 96(\OUT_POS)
movdqu S13, 112(\OUT_POS)
.endm
.macro GENERATE_1_STATE
add $1, %r11d
pinsrd $0, %r11d, O03
movdqu O00, S00 // set state 0
movdqu O01, S01
movdqu O02, S02
movdqu O03, S03
.endm
.macro GENERATE_2_STATE
add $1, %r11d
pinsrd $0, %r11d, O03
movdqu O00, S00 // set state 0
movdqu O01, S01
movdqu O02, S02
movdqu O03, S03
movdqu O00, S10 // set state 1
movdqu O01, S11
movdqu O02, S12
movdqu O03, S13
add $1, %r11d
pinsrd $0, %r11d, S13
.endm
/*
* Processing 64 bytes: 4 xmm registers
* xmm0 ~ xmm3:
* xmm0 {0, 1, 2, 3}
* xmm1 {4, 5, 6, 7}
* xmm2 {8, 9, 10, 11}
* xmm3 {12, 13, 14, 15}
*
* Processing 128 bytes: 8 xmm registers
* xmm0 ~ xmm8:
* xmm0 {0, 1, 2, 3} xmm5 {0, 1, 2, 3}
* xmm1 {4, 5, 6, 7} xmm6 {4, 5, 6, 7}
* xmm2 {8, 9, 10, 11} xmm7 {8, 9, 10, 11}
* xmm3 {12, 13, 14, 15} xmm8 {12, 13, 14, 15}
*
* Processing 256 bytes: 16 xmm registers
* xmm0 ~ xmm15:
* xmm0 {0, 0, 0, 0}
* xmm1 {1, 2, 2, 2}
* xmm2 {3, 3, 3, 3}
* xmm3 {4, 4, 4, 4}
* ...
* xmm15 {15, 15, 15, 15}
*
* Processing 512 bytes: 16 xmm registers
* ymm0 ~ ymm15:
* ymm0 {0, 0, 0, 0}
* ymm1 {1, 2, 2, 2}
* ymm2 {3, 3, 3, 3}
* ymm3 {4, 4, 4, 4}
* ...
* ymm15 {15, 15, 15, 15}
*
*/
/**
* @Interconnection with the C interface:void CHACHA20_Update(CRYPT_CHACHA20_Ctx *ctx, const uint8_t *in, uint8_t *out, uint32_t len);
* @brief chacha20 algorithm
* @param ctx [IN] Algorithm context, which is set by the C interface and transferred.
* @param in [IN] Data to be encrypted
* @param out [OUT] Data after encryption
* @param len [IN] Encrypted length
* esp cannot use 15 available ctx in out len
* 16 registers are needed in one cycle, then
* {0, 1, 4, 5, 8, 9, 12, 13}
* {2, 3, 6, 7, 10, 11, 14, 15}
**/
.globl CHACHA20_Update
.type CHACHA20_Update,%function
.align 64
CHACHA20_Update:
.cfi_startproc
push %r12
mov %rcx, %r12
mov 48(%rdi), %r11d
mov %rsi, IN
mov %rdx, OUT
movdqu (%rdi), O00 // state[0-3]
movdqu 16(%rdi), O01 // state[4-7]
movdqu 32(%rdi), O02 // state[8-11]
movdqu 48(%rdi), O03 // state[12-15]
sub $1, %r11d
.LChaCha20_start:
cmp $128, %r12
jae .LChaCha20_128_start
cmp $64, %r12
jae .LChaCha20_64_start
jmp .LChaCha20_end
.LChaCha20_64_start:
GENERATE_1_STATE
mov $10, %r8
.LChaCha20_64_loop:
sub $1, %r8
/* 0 = 0 + 4, 12 = (12 ^ 0) >>> 16 | 8 = 8 + 12, 4 = (4 ^ 8) >>> 12 | 0 = 0 + 4, 12 = (12 ^ 0) >>> 8 | 8 = 8 + 12, 4 = (4 ^ 8) >>> 7 */
/* 1 = 1 + 5, 13 = (13 ^ 1) >>> 16 | 9 = 9 + 13, 5 = (5 ^ 9) >>> 12 | 1 = 1 + 5, 13 = (13 ^ 1) >>> 8 | 9 = 9 + 13, 5 = (5 ^ 9) >>> 7 */
/* 2 = 2 + 6, 14 = (14 ^ 2) >>> 16 | 10 = 10 + 14, 6 = (6 ^ 10)>>> 12 | 2 = 2 + 6, 14 = (14 ^ 2) >>> 8 | 10 =10+ 14, 6 = (6 ^ 10)>>> 7 */
/* 3 = 3 + 7, 15 = (15 ^ 3) >>> 16 | 11 = 11 + 15, 7 = (7 ^ 11)>>> 12 | 3 = 3 + 7 ,15 = (15 ^ 3) >>> 8 | 11 =11+ 15, 7 = (7 ^ 11)>>> 7 */
CHACHA20_ROUND S00 S01 S02 S03 %xmm4
pshufd $78, S02, S02 // {8 9 10 11} ==> {10 11 8 9} 01 00 11 10
pshufd $57, S01, S01 // {4 5 6 7} ==> {5 6 7 4} 00 11 10 01
pshufd $147, S03, S03 // {12 13 14 15} ==> {15 12 13 14} 10 01 00 11
/* 0 = 0 + 5 , 15 = (15 ^ 0) >>> 16 | 10 = 10 + 15, 5 = (5 ^ 10) >>> 12 | 0 = 0 + 5, 15 = (15 ^ 0) >>> 8 | 10 = 10 + 15, 5 = (5 ^ 10) >>> 7 */
/* 1 = 1 + 6 , 12 = (12 ^ 1) >>> 16 | 11 = 11 + 12, 6 = (6 ^ 11) >>> 12 | 1 = 1 + 6, 12 = (12 ^ 1) >>> 8 | 11 = 11 + 12, 6 = (6 ^ 11) >>> 7 */
/* 2 = 2 + 7 , 13 = (13 ^ 2) >>> 16 | 8 = 8 + 13, 7 = (7 ^ 8)>>> 12 | 2 = 2 + 7, 13 = (13 ^ 2) >>> 8 | 8 = 8 + 13, 7 = (7 ^ 8)>>> 7 */
/* 3 = 3 + 4 , 14 = (14 ^ 3) >>> 16 | 9 = 9 + 14, 4 = (4 ^ 9)>>> 12 | 3 = 3 + 4, 14 = (14 ^ 3) >>> 8 | 9 = 9 + 14, 4 = (4 ^ 9)>>> 7 */
CHACHA20_ROUND S00 S01 S02 S03 %xmm4
pshufd $78, S02, S02 // {10 11 8 9} ==> {8 9 10 11} 01 00 11 10
pshufd $147, S01, S01 // {5 6 7 4} ==> {4 5 6 7} 00 11 10 01
pshufd $57, S03, S03 // {15 12 13 14} ==> {12 13 14 15} 10 01 00 11
jnz .LChaCha20_64_loop
WRITE_BACK_64 IN OUT
add $64, IN
add $64, OUT
sub $64, %r12
jmp .LChaCha20_start
.LChaCha20_128_start:
GENERATE_2_STATE
mov $10, %r8
.LChaCha20_128_loop:
CHACHA20_2_ROUND S00 S01 S02 S03 %xmm4 S10 S11 S12 S13 %xmm9
pshufd $78, S02, S02 // {8 9 10 11} ==> {10 11 8 9} 01 00 11 10
pshufd $57, S01, S01 // {4 5 6 7} ==> {5 6 7 4} 00 11 10 01
pshufd $147, S03, S03 // {12 13 14 15} ==> {15 12 13 14} 10 01 00 11
pshufd $78, S12, S12 // {8 9 10 11} ==> {10 11 8 9} 01 00 11 10
pshufd $57, S11, S11 // {4 5 6 7} ==> {5 6 7 4} 00 11 10 01
pshufd $147, S13, S13 // {12 13 14 15} ==> {15 12 13 14} 10 01 00 11
CHACHA20_2_ROUND S00 S01 S02 S03 %xmm4 S10 S11 S12 S13 %xmm9
pshufd $78, S02, S02 // {8 9 10 11} ==> {10 11 8 9} 01 00 11 10
pshufd $147, S01, S01 // {4 5 6 7} ==> {5 6 7 4} 00 11 10 01
pshufd $57, S03, S03 // {12 13 14 15} ==> {15 12 13 14} 10 01 00 11
pshufd $78, S12, S12 // {8 9 10 11} ==> {10 11 8 9} 01 00 11 10
pshufd $147, S11, S11 // {4 5 6 7} ==> {5 6 7 4} 00 11 10 01
pshufd $57, S13, S13 // {12 13 14 15} ==> {15 12 13 14} 10 01 00 11
sub $1, %r8
jnz .LChaCha20_128_loop
WRITE_BACK_128 IN OUT
add $128, IN
add $128, OUT
sub $128, %r12
jmp .LChaCha20_start
.LChaCha20_end:
add $1, %r11d
mov %r11d, 48(%rdi)
pop %r12
ret
.cfi_endproc
.size CHACHA20_Update,.-CHACHA20_Update
#endif
| 2302_82127028/openHiTLS-examples_1508 | crypto/chacha20/src/asm/chachablock_x86_AVX2.S | Unix Assembly | unknown | 10,613 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#include "hitls_build.h"
#ifdef HITLS_CRYPTO_CHACHA20
#include "securec.h"
#include "bsl_err_internal.h"
#include "crypt_utils.h"
#include "crypt_errno.h"
#include "crypt_chacha20.h"
#include "chacha20_local.h"
#define KEYSET 0x01
#define NONCESET 0x02
// RFC7539-2.1
#define QUARTER(a, b, c, d) \
do { \
(a) += (b); (d) ^= (a); (d) = ROTL32((d), 16); \
(c) += (d); (b) ^= (c); (b) = ROTL32((b), 12); \
(a) += (b); (d) ^= (a); (d) = ROTL32((d), 8); \
(c) += (d); (b) ^= (c); (b) = ROTL32((b), 7); \
} while (0)
#define QUARTERROUND(state, a, b, c, d) QUARTER((state)[(a)], (state)[(b)], (state)[(c)], (state)[(d)])
int32_t CRYPT_CHACHA20_SetKey(CRYPT_CHACHA20_Ctx *ctx, const uint8_t *key, uint32_t keyLen)
{
if (ctx == NULL || key == NULL || keyLen == 0) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return CRYPT_NULL_INPUT;
}
if (keyLen != CHACHA20_KEYLEN) {
BSL_ERR_PUSH_ERROR(CRYPT_CHACHA20_KEYLEN_ERROR);
return CRYPT_CHACHA20_KEYLEN_ERROR;
}
/**
* RFC7539-2.3
* cccccccc cccccccc cccccccc cccccccc
* kkkkkkkk kkkkkkkk kkkkkkkk kkkkkkkk
* kkkkkkkk kkkkkkkk kkkkkkkk kkkkkkkk
* bbbbbbbb nnnnnnnn nnnnnnnn nnnnnnnn
*/
// The first four words (0-3) are constants: 0x61707865, 0x3320646e, 0x79622d32, 0x6b206574
ctx->state[0] = 0x61707865;
ctx->state[1] = 0x3320646e;
ctx->state[2] = 0x79622d32;
ctx->state[3] = 0x6b206574;
/**
* The next eight words (4-11) are taken from the 256-bit key by
* reading the bytes in little-endian order, in 4-byte chunks.
*/
ctx->state[4] = GET_UINT32_LE(key, 0);
ctx->state[5] = GET_UINT32_LE(key, 4);
ctx->state[6] = GET_UINT32_LE(key, 8);
ctx->state[7] = GET_UINT32_LE(key, 12);
ctx->state[8] = GET_UINT32_LE(key, 16);
ctx->state[9] = GET_UINT32_LE(key, 20);
ctx->state[10] = GET_UINT32_LE(key, 24);
ctx->state[11] = GET_UINT32_LE(key, 28);
// Word 12 is a block counter
// RFC7539-2.4: It makes sense to use one if we use the zero block
ctx->state[12] = 1;
ctx->set |= KEYSET;
ctx->lastLen = 0;
return CRYPT_SUCCESS;
}
static int32_t CRYPT_CHACHA20_SetNonce(CRYPT_CHACHA20_Ctx *ctx, const uint8_t *nonce, uint32_t nonceLen)
{
// RFC7539-2.3
if (ctx == NULL || nonce == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return CRYPT_NULL_INPUT;
}
if (nonceLen != CHACHA20_NONCELEN) {
BSL_ERR_PUSH_ERROR(CRYPT_CHACHA20_NONCELEN_ERROR);
return CRYPT_CHACHA20_NONCELEN_ERROR;
}
/**
* Words 13-15 are a nonce, which should not be repeated for the same
* key. The 13th word is the first 32 bits of the input nonce taken
* as a little-endian integer, while the 15th word is the last 32
* bits.
*/
ctx->state[13] = GET_UINT32_LE(nonce, 0);
ctx->state[14] = GET_UINT32_LE(nonce, 4);
ctx->state[15] = GET_UINT32_LE(nonce, 8);
ctx->set |= NONCESET;
ctx->lastLen = 0;
return CRYPT_SUCCESS;
}
// Little-endian data input
static int32_t CRYPT_CHACHA20_SetCount(CRYPT_CHACHA20_Ctx *ctx, const uint8_t *cnt, uint32_t cntLen)
{
if (ctx == NULL || cnt == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return CRYPT_NULL_INPUT;
}
if (cntLen != sizeof(uint32_t)) {
BSL_ERR_PUSH_ERROR(CRYPT_CHACHA20_COUNTLEN_ERROR);
return CRYPT_CHACHA20_COUNTLEN_ERROR;
}
/**
* RFC7539-2.4
* This can be set to any number, but will
* usually be zero or one. It makes sense to use one if we use the
* zero block for something else, such as generating a one-time
* authenticator key as part of an AEAD algorithm
*/
ctx->state[12] = GET_UINT32_LE((uintptr_t)cnt, 0);
ctx->lastLen = 0;
return CRYPT_SUCCESS;
}
void CHACHA20_Block(CRYPT_CHACHA20_Ctx *ctx)
{
uint32_t i;
// The length defined by ctx->last.c is the same as that defined by ctx->state.
// Therefore, the returned value is not out of range.
(void)memcpy_s(ctx->last.c, CHACHA20_STATEBYTES, ctx->state, sizeof(ctx->state));
/* RFC7539-2.3 These are 20 round in this function */
for (i = 0; i < 10; i++) {
/* column round */
QUARTERROUND(ctx->last.c, 0, 4, 8, 12);
QUARTERROUND(ctx->last.c, 1, 5, 9, 13);
QUARTERROUND(ctx->last.c, 2, 6, 10, 14);
QUARTERROUND(ctx->last.c, 3, 7, 11, 15);
/* diagonal round */
QUARTERROUND(ctx->last.c, 0, 5, 10, 15);
QUARTERROUND(ctx->last.c, 1, 6, 11, 12);
QUARTERROUND(ctx->last.c, 2, 7, 8, 13);
QUARTERROUND(ctx->last.c, 3, 4, 9, 14);
}
/* Reference from rfc 7539, At the end of 20 rounds (or 10 iterations of the above list),
* we add the original input words to the output words
*/
for (i = 0; i < CHACHA20_STATESIZE; i++) {
ctx->last.c[i] += ctx->state[i];
ctx->last.c[i] = CRYPT_HTOLE32(ctx->last.c[i]);
}
ctx->state[12]++;
}
int32_t CRYPT_CHACHA20_Update(CRYPT_CHACHA20_Ctx *ctx, const uint8_t *in, uint8_t *out, uint32_t len)
{
if (ctx == NULL || out == NULL || in == NULL || len == 0) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return CRYPT_NULL_INPUT;
}
if ((ctx->set & KEYSET) == 0) {
BSL_ERR_PUSH_ERROR(CRYPT_CHACHA20_NO_KEYINFO);
return CRYPT_CHACHA20_NO_KEYINFO;
}
if ((ctx->set & NONCESET) == 0) {
BSL_ERR_PUSH_ERROR(CRYPT_CHACHA20_NO_NONCEINFO);
return CRYPT_CHACHA20_NO_NONCEINFO;
}
uint32_t i;
const uint8_t *offIn = in;
uint8_t *offOut = out;
uint32_t tLen = len;
if (ctx->lastLen != 0) { // has remaining data during the last processing
uint32_t num = (tLen < ctx->lastLen) ? tLen : ctx->lastLen;
uint8_t *tLast = ctx->last.u + CHACHA20_STATEBYTES - ctx->lastLen; // offset
for (i = 0; i < num; i++) {
offOut[i] = tLast[i] ^ offIn[i];
}
offIn += num;
offOut += num;
tLen -= num;
ctx->lastLen -= num;
}
if (tLen >= CHACHA20_STATEBYTES) { // which is greater than or equal to an integer multiple of 64 bytes
CHACHA20_Update(ctx, offIn, offOut, tLen); // processes data that is an integer multiple of 64 bytes
uint32_t vLen = tLen - (tLen & 0x3f); // 0x3f = %CHACHA20_STATEBYTES
offIn += vLen;
offOut += vLen;
tLen -= vLen;
}
// Process the remaining data
if (tLen > 0) {
CHACHA20_Block(ctx);
uint32_t t = tLen & 0xf8; // processing length is a multiple of 8
if (t != 0) {
DATA64_XOR(ctx->last.u, offIn, offOut, t);
}
for (i = t; i < tLen; i++) {
offOut[i] = ctx->last.u[i] ^ offIn[i];
}
ctx->lastLen = CHACHA20_STATEBYTES - tLen;
}
return CRYPT_SUCCESS;
}
int32_t CRYPT_CHACHA20_Ctrl(CRYPT_CHACHA20_Ctx *ctx, int32_t opt, void *val, uint32_t len)
{
switch (opt) {
case CRYPT_CTRL_SET_IV: // in chacha20_poly1305 mode, the configured IV is the nonce of chacha20.
/**
* RFC_7539-2.8.1
* chacha20_aead_encrypt(aad, key, iv, constant, plaintext):
* nonce = constant | iv
*/
return CRYPT_CHACHA20_SetNonce(ctx, val, len);
case CRYPT_CTRL_SET_COUNT:
return CRYPT_CHACHA20_SetCount(ctx, val, len);
default:
BSL_ERR_PUSH_ERROR(CRYPT_CHACHA20_CTRLTYPE_ERROR);
return CRYPT_CHACHA20_CTRLTYPE_ERROR;
}
}
void CRYPT_CHACHA20_Clean(CRYPT_CHACHA20_Ctx *ctx)
{
if (ctx == NULL) {
return;
}
(void)memset_s(ctx, sizeof(CRYPT_CHACHA20_Ctx), 0, sizeof(CRYPT_CHACHA20_Ctx));
}
#endif // HITLS_CRYPTO_CHACHA20
| 2302_82127028/openHiTLS-examples_1508 | crypto/chacha20/src/chacha20.c | C | unknown | 8,292 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#ifndef CHACHA20_LOCAL_H
#define CHACHA20_LOCAL_H
#include "hitls_build.h"
#ifdef HITLS_CRYPTO_CHACHA20
#include "crypt_chacha20.h"
void CHACHA20_Block(CRYPT_CHACHA20_Ctx *ctx);
void CHACHA20_Update(CRYPT_CHACHA20_Ctx *ctx, const uint8_t *in,
uint8_t *out, uint32_t len);
#endif // HITLS_CRYPTO_CHACHA20
#endif // CHACHA20_LOCAL_H
| 2302_82127028/openHiTLS-examples_1508 | crypto/chacha20/src/chacha20_local.h | C | unknown | 885 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#include "hitls_build.h"
#ifdef HITLS_CRYPTO_CHACHA20
#include "crypt_utils.h"
#include "chacha20_local.h"
void CHACHA20_Update(CRYPT_CHACHA20_Ctx *ctx, const uint8_t *in,
uint8_t *out, uint32_t len)
{
const uint8_t *offIn = in;
uint8_t *offOut = out;
uint32_t tLen = len;
// one block is processed each time
while (tLen >= CHACHA20_STATEBYTES) {
CHACHA20_Block(ctx);
// Process 64 bits at a time
DATA64_XOR(ctx->last.u, offIn, offOut, CHACHA20_STATEBYTES);
offIn += CHACHA20_STATEBYTES;
offOut += CHACHA20_STATEBYTES;
tLen -= CHACHA20_STATEBYTES;
}
}
#endif // HITLS_CRYPTO_CHACHA20
| 2302_82127028/openHiTLS-examples_1508 | crypto/chacha20/src/chacha20block.c | C | unknown | 1,206 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#ifndef CRYPT_CBC_MAC_H
#define CRYPT_CBC_MAC_H
#include "hitls_build.h"
#ifdef HITLS_CRYPTO_CBC_MAC
#include <stdint.h>
#include "crypt_types.h"
#include "crypt_cmac.h"
#ifdef __cplusplus
extern "C" {
#endif /* __cpluscplus */
typedef struct CBC_MAC_Ctx CRYPT_CBC_MAC_Ctx;
#define CRYPT_CBC_MAC_SetParam NULL
/**
* @brief Create a new CBC_MAC context.
* @param id [IN] CBC_MAC algorithm ID
* @return Pointer to the CBC_MAC context
*/
CRYPT_CBC_MAC_Ctx *CRYPT_CBC_MAC_NewCtx(CRYPT_MAC_AlgId id);
/**
* @brief Create a new CBC_MAC context with external library context.
* @param libCtx [in] External library context
* @param id [in] CBC_MAC algorithm ID
* @return Pointer to the CBC_MAC context
*/
CRYPT_CBC_MAC_Ctx *CRYPT_CBC_MAC_NewCtxEx(void *libCtx, CRYPT_MAC_AlgId id);
/**
* @brief Use the key passed by the user to initialize the algorithm context.
* @param ctx [IN] CBC_MAC context
* @param key [in] symmetric algorithm key
* @param len [in] Key length
* @param param [in] param
* @retval #CRYPT_SUCCESS Succeeded.
* @retval #CRYPT_NULL_INPUT The input parameter is NULL.
* For other error codes, see crypt_errno.h.
*/
int32_t CRYPT_CBC_MAC_Init(CRYPT_CBC_MAC_Ctx *ctx, const uint8_t *key, uint32_t len, void *param);
/**
* @brief Enter the data to be calculated and update the context.
* @param ctx [IN] CBC_MAC context
* @param *in [in] Pointer to the data to be calculated
* @param len [in] Length of the data to be calculated
* @retval #CRYPT_SUCCESS Succeeded.
* @retval #CRYPT_NULL_INPUT The input parameter is NULL.
* For other error codes, see crypt_errno.h.
*/
int32_t CRYPT_CBC_MAC_Update(CRYPT_CBC_MAC_Ctx *ctx, const uint8_t *in, uint32_t len);
/**
* @brief Output the cmac calculation result.
* @param ctx [IN] CBC_MAC context
* @param out [OUT] Output data. Sufficient memory must be allocated to store CBC_MAC results and cannot be null.
* @param len [IN/OUT] Output data length
* @retval #CRYPT_SUCCESS Succeeded.
* @retval #CRYPT_NULL_INPUT The input parameter is NULL.
* @retval #CRYPT_EAL_BUFF_LEN_NOT_ENOUGH The length of the output buffer is insufficient.
* For other error codes, see crypt_errno.h.
*/
int32_t CRYPT_CBC_MAC_Final(CRYPT_CBC_MAC_Ctx *ctx, uint8_t *out, uint32_t *len);
/**
* @brief Re-initialize using the information retained in the ctx. Do not need to invoke the init again.
* This function is equivalent to the combination of deinit and init interfaces.
* @param ctx [IN] CBC_MAC context
*/
int32_t CRYPT_CBC_MAC_Reinit(CRYPT_CBC_MAC_Ctx *ctx);
/**
* @brief Deinitialization function.
* If calculation is required after this function is invoked, it needs to be initialized again.
* @param ctx [IN] CBC_MAC context
*/
int32_t CRYPT_CBC_MAC_Deinit(CRYPT_CBC_MAC_Ctx *ctx);
/**
* @brief CBC_MAC control function to set some information
* @param ctx [IN] CBC_MAC context
* @param opt [IN] option
* @param val [IN] value
* @param len [IN] the length of value
* @return See crypt_errno.h.
*/
int32_t CRYPT_CBC_MAC_Ctrl(CRYPT_CBC_MAC_Ctx *ctx, uint32_t opt, void *val, uint32_t len);
/**
* @brief Free the CBC_MAC context.
* @param ctx [IN] CBC_MAC context
*/
void CRYPT_CBC_MAC_FreeCtx(CRYPT_CBC_MAC_Ctx *ctx);
#ifdef __cplusplus
}
#endif /* __cpluscplus */
#endif
#endif
| 2302_82127028/openHiTLS-examples_1508 | crypto/cmac/include/crypt_cbc_mac.h | C | unknown | 4,037 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#ifndef CRYPT_CMAC_H
#define CRYPT_CMAC_H
#include "hitls_build.h"
#ifdef HITLS_CRYPTO_CMAC
#include <stdint.h>
#include "crypt_types.h"
#ifdef __cplusplus
extern "C" {
#endif /* __cpluscplus */
typedef struct Cipher_MAC_Ctx CRYPT_CMAC_Ctx;
#define CRYPT_CMAC_SetParam NULL
/**
* @brief Create a new CMAC context.
* @param id [in] Symmetric encryption and decryption algorithm ID
* @return CMAC context
*/
CRYPT_CMAC_Ctx *CRYPT_CMAC_NewCtx(CRYPT_MAC_AlgId id);
/**
* @brief Create a new CMAC context with external library context.
* @param libCtx [in] External library context
* @param id [in] Symmetric encryption and decryption algorithm ID
* @return CMAC context
*/
CRYPT_CMAC_Ctx *CRYPT_CMAC_NewCtxEx(void *libCtx, CRYPT_MAC_AlgId id);
/**
* @brief Use the key passed by the user to initialize the algorithm context.
* @param ctx [IN] CMAC context
* @param key [in] symmetric algorithm key
* @param len [in] Key length
* @param param [in] param
* @retval #CRYPT_SUCCESS Succeeded.
* @retval #CRYPT_NULL_INPUT The input parameter is NULL.
* For other error codes, see crypt_errno.h.
*/
int32_t CRYPT_CMAC_Init(CRYPT_CMAC_Ctx *ctx, const uint8_t *key, uint32_t len, void *param);
/**
* @brief Enter the data to be calculated and update the context.
* @param ctx [IN] CMAC context
* @param *in [in] Pointer to the data to be calculated
* @param len [in] Length of the data to be calculated
* @retval #CRYPT_SUCCESS Succeeded.
* @retval #CRYPT_NULL_INPUT The input parameter is NULL.
* For other error codes, see crypt_errno.h.
*/
int32_t CRYPT_CMAC_Update(CRYPT_CMAC_Ctx *ctx, const uint8_t *in, uint32_t len);
/**
* @brief Output the cmac calculation result.
* @param ctx [IN] CMAC context
* @param out [OUT] Output data. Sufficient memory must be allocated to store CMAC results and cannot be null.
* @param len [IN/OUT] Output data length
* @retval #CRYPT_SUCCESS Succeeded.
* @retval #CRYPT_NULL_INPUT The input parameter is NULL.
* @retval #CRYPT_EAL_BUFF_LEN_NOT_ENOUGH The length of the output buffer is insufficient.
* For other error codes, see crypt_errno.h.
*/
int32_t CRYPT_CMAC_Final(CRYPT_CMAC_Ctx *ctx, uint8_t *out, uint32_t *len);
/**
* @brief Re-initialize using the information retained in the ctx. Do not need to invoke the init again.
* This function is equivalent to the combination of deinit and init interfaces.
* @param ctx [IN] CMAC context
* @retval #CRYPT_SUCCESS Succeeded.
* @retval #CRYPT_NULL_INPUT The input parameter is NULL.
* For other error codes, see crypt_errno.h.
*/
int32_t CRYPT_CMAC_Reinit(CRYPT_CMAC_Ctx *ctx);
/**
* @brief Deinitialization function.
* If calculation is required after this function is invoked, it needs to be initialized again.
* @param ctx [IN] CMAC context
* @retval #CRYPT_SUCCESS Succeeded.
* @retval #CRYPT_NULL_INPUT The input parameter is NULL.
* For other error codes, see crypt_errno.h.
*/
int32_t CRYPT_CMAC_Deinit(CRYPT_CMAC_Ctx *ctx);
/**
* @brief Control function for CMAC.
* @param ctx [IN] CMAC context
* @param opt [IN] Control option
* @param val [IN]/[OUT] Control value
* @param len [IN] Control value length
* @retval #CRYPT_SUCCESS Succeeded.
* @retval #CRYPT_NULL_INPUT The input parameter is NULL.
* For other error codes, see crypt_errno.h.
*/
int32_t CRYPT_CMAC_Ctrl(CRYPT_CMAC_Ctx *ctx, uint32_t opt, void *val, uint32_t len);
/**
* @brief Free the CMAC context.
* @param ctx [IN] CMAC context
*/
void CRYPT_CMAC_FreeCtx(CRYPT_CMAC_Ctx *ctx);
#ifdef __cplusplus
}
#endif /* __cpluscplus */
#endif
#endif | 2302_82127028/openHiTLS-examples_1508 | crypto/cmac/include/crypt_cmac.h | C | unknown | 4,258 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#include "hitls_build.h"
#ifdef HITLS_CRYPTO_CBC_MAC
#include <stdint.h>
#include "bsl_sal.h"
#include "crypt_types.h"
#include "crypt_utils.h"
#include "bsl_err_internal.h"
#include "cipher_mac_common.h"
#include "crypt_errno.h"
#include "crypt_cbc_mac.h"
#include "eal_mac_local.h"
CRYPT_CBC_MAC_Ctx *CRYPT_CBC_MAC_NewCtx(CRYPT_MAC_AlgId id)
{
int32_t ret;
EAL_MacDepMethod method = {0};
ret = EAL_MacFindDepMethod(id, NULL, NULL, &method, NULL, false);
if (ret != CRYPT_SUCCESS) {
return NULL;
}
CRYPT_CBC_MAC_Ctx *ctx = BSL_SAL_Calloc(1, sizeof(CRYPT_CBC_MAC_Ctx));
if (ctx == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_MEM_ALLOC_FAIL);
return NULL;
}
ret = CipherMacInitCtx(&ctx->common, method.method.sym);
if (ret != CRYPT_SUCCESS) {
BSL_SAL_Free(ctx);
return NULL;
}
ctx->paddingType = CRYPT_PADDING_MAX_COUNT;
return ctx;
}
CRYPT_CBC_MAC_Ctx *CRYPT_CBC_MAC_NewCtxEx(void *libCtx, CRYPT_MAC_AlgId id)
{
(void)libCtx;
return CRYPT_CBC_MAC_NewCtx(id);
}
int32_t CRYPT_CBC_MAC_Init(CRYPT_CBC_MAC_Ctx *ctx, const uint8_t *key, uint32_t len, void *param)
{
(void)param;
if (ctx == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return CRYPT_NULL_INPUT;
}
return CipherMacInit(&ctx->common, key, len);
}
int32_t CRYPT_CBC_MAC_Update(CRYPT_CBC_MAC_Ctx *ctx, const uint8_t *in, uint32_t len)
{
if (ctx == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return CRYPT_NULL_INPUT;
}
if (ctx->paddingType == CRYPT_PADDING_MAX_COUNT) {
BSL_ERR_PUSH_ERROR(CRYPT_CBC_MAC_PADDING_NOT_SET);
return CRYPT_CBC_MAC_PADDING_NOT_SET;
}
return CipherMacUpdate(&ctx->common, in, len);
}
static int32_t CbcMacPadding(CRYPT_CBC_MAC_Ctx *ctx)
{
const EAL_SymMethod *method = ctx->common.method;
uint32_t length = ctx->common.len;
uint32_t padLen = method->blockSize - length;
switch (ctx->paddingType) {
case CRYPT_PADDING_ZEROS:
for (uint32_t i = 0; i < padLen; i++) {
ctx->common.left[length++] = 0;
}
ctx->common.len = length;
return CRYPT_SUCCESS;
default:
BSL_ERR_PUSH_ERROR(CRYPT_CBC_MAC_PADDING_NOT_SUPPORT);
return CRYPT_CBC_MAC_PADDING_NOT_SUPPORT;
}
}
int32_t CRYPT_CBC_MAC_Final(CRYPT_CBC_MAC_Ctx *ctx, uint8_t *out, uint32_t *len)
{
if (ctx == NULL || ctx->common.method == NULL || len == NULL || out == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return CRYPT_NULL_INPUT;
}
const EAL_SymMethod *method = ctx->common.method;
uint32_t blockSize = method->blockSize;
if (*len < blockSize) {
BSL_ERR_PUSH_ERROR(CRYPT_CBC_MAC_OUT_BUFF_LEN_NOT_ENOUGH);
return CRYPT_CBC_MAC_OUT_BUFF_LEN_NOT_ENOUGH;
}
int32_t ret = CbcMacPadding(ctx);
if (ret != CRYPT_SUCCESS) {
return ret;
}
DATA_XOR(ctx->common.left, ctx->common.data, ctx->common.left, blockSize);
ret = method->encryptBlock(ctx->common.key, ctx->common.left, out, blockSize);
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
return ret;
}
*len = blockSize;
return CRYPT_SUCCESS;
}
int32_t CRYPT_CBC_MAC_Reinit(CRYPT_CBC_MAC_Ctx *ctx)
{
if (ctx == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return CRYPT_NULL_INPUT;
}
return CipherMacReinit(&ctx->common);
}
int32_t CRYPT_CBC_MAC_Deinit(CRYPT_CBC_MAC_Ctx *ctx)
{
if (ctx == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return CRYPT_NULL_INPUT;
}
return CipherMacDeinit(&ctx->common);
}
int32_t CRYPT_CBC_MAC_Ctrl(CRYPT_CBC_MAC_Ctx *ctx, uint32_t opt, void *val, uint32_t len)
{
if (ctx == NULL || val == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return CRYPT_NULL_INPUT;
}
switch (opt) {
case CRYPT_CTRL_SET_CBC_MAC_PADDING:
if (len != sizeof(CRYPT_PaddingType)) {
BSL_ERR_PUSH_ERROR(CRYPT_CBC_MAC_ERR_CTRL_LEN);
return CRYPT_CBC_MAC_ERR_CTRL_LEN;
}
ctx->paddingType = *(CRYPT_PaddingType*)val;
return CRYPT_SUCCESS;
case CRYPT_CTRL_GET_MACLEN:
return CipherMacGetMacLen(&ctx->common, val, len);
default:
BSL_ERR_PUSH_ERROR(CRYPT_CBC_MAC_ERR_UNSUPPORTED_CTRL_OPTION);
return CRYPT_CBC_MAC_ERR_UNSUPPORTED_CTRL_OPTION;
}
}
void CRYPT_CBC_MAC_FreeCtx(CRYPT_CBC_MAC_Ctx *ctx)
{
if (ctx == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return;
}
CipherMacDeinitCtx(&ctx->common);
BSL_SAL_Free(ctx);
}
#endif
| 2302_82127028/openHiTLS-examples_1508 | crypto/cmac/src/cbc_mac.c | C | unknown | 5,409 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#include "hitls_build.h"
#if defined(HITLS_CRYPTO_CBC_MAC) || defined(HITLS_CRYPTO_CMAC)
#include <stdlib.h>
#include "securec.h"
#include "bsl_sal.h"
#include "crypt_utils.h"
#include "crypt_errno.h"
#include "bsl_err_internal.h"
#include "crypt_local_types.h"
#include "cipher_mac_common.h"
int32_t CipherMacInitCtx(Cipher_MAC_Common_Ctx *ctx, const EAL_SymMethod *method)
{
if (ctx == NULL || method == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return CRYPT_NULL_INPUT;
}
void *key = (void *)BSL_SAL_Calloc(1u, method->ctxSize);
if (key == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_MEM_ALLOC_FAIL);
return CRYPT_MEM_ALLOC_FAIL;
}
// set key and set method
ctx->key = key;
ctx->method = method;
return CRYPT_SUCCESS;
}
void CipherMacDeinitCtx(Cipher_MAC_Common_Ctx *ctx)
{
if (ctx == NULL || ctx->method == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return;
}
const EAL_SymMethod *method = ctx->method;
BSL_SAL_CleanseData((void *)(ctx->key), method->ctxSize);
BSL_SAL_FREE(ctx->key);
}
int32_t CipherMacInit(Cipher_MAC_Common_Ctx *ctx, const uint8_t *key, uint32_t len)
{
if (ctx == NULL || ctx->method == NULL || (key == NULL && len != 0)) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return CRYPT_NULL_INPUT;
}
int32_t ret = ctx->method->setEncryptKey(ctx->key, key, len);
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
return ret;
}
(void)memset_s(ctx->data, CIPHER_MAC_MAXBLOCKSIZE, 0, CIPHER_MAC_MAXBLOCKSIZE);
ctx->len = 0;
return CRYPT_SUCCESS;
}
int32_t CipherMacUpdate(Cipher_MAC_Common_Ctx *ctx, const uint8_t *in, uint32_t len)
{
if (ctx == NULL || ctx->method == NULL || (in == NULL && len != 0)) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return CRYPT_NULL_INPUT;
}
const EAL_SymMethod *method = ctx->method;
int32_t ret;
uint32_t blockSize = method->blockSize;
const uint8_t *inTmp = in;
uint32_t lenTmp = len;
if (ctx->len > 0) {
if (ctx->len > (UINT32_MAX - lenTmp)) {
BSL_ERR_PUSH_ERROR(CRYPT_CMAC_INPUT_OVERFLOW);
return CRYPT_CMAC_INPUT_OVERFLOW;
}
uint32_t end = (ctx->len + lenTmp) > (blockSize) ? (blockSize) : (ctx->len + lenTmp);
for (uint32_t i = ctx->len; i < end; i++) {
ctx->left[i] = (*inTmp);
inTmp++;
}
lenTmp -= (end - ctx->len);
if (lenTmp == 0) {
ctx->len = end;
return CRYPT_SUCCESS;
}
DATA_XOR(ctx->left, ctx->data, ctx->left, blockSize);
ret = method->encryptBlock(ctx->key, ctx->left, ctx->data, blockSize);
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
return ret;
}
}
while (lenTmp > blockSize) {
DATA_XOR(inTmp, ctx->data, ctx->left, blockSize);
ret = method->encryptBlock(ctx->key, ctx->left, ctx->data, blockSize);
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
return ret;
}
lenTmp -= blockSize;
inTmp += blockSize;
}
for (uint32_t i = 0; i < lenTmp; i++) {
ctx->left[i] = inTmp[i];
}
ctx->len = lenTmp;
return CRYPT_SUCCESS;
}
int32_t CipherMacReinit(Cipher_MAC_Common_Ctx *ctx)
{
if (ctx == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return CRYPT_NULL_INPUT;
}
(void)memset_s(ctx->data, CIPHER_MAC_MAXBLOCKSIZE, 0, CIPHER_MAC_MAXBLOCKSIZE);
ctx->len = 0;
return CRYPT_SUCCESS;
}
int32_t CipherMacDeinit(Cipher_MAC_Common_Ctx *ctx)
{
if (ctx == NULL || ctx->method == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return CRYPT_NULL_INPUT;
}
const uint32_t ctxSize = ctx->method->ctxSize;
BSL_SAL_CleanseData(ctx->key, ctxSize);
(void)memset_s(ctx->data, CIPHER_MAC_MAXBLOCKSIZE, 0, CIPHER_MAC_MAXBLOCKSIZE);
(void)memset_s(ctx->left, CIPHER_MAC_MAXBLOCKSIZE, 0, CIPHER_MAC_MAXBLOCKSIZE);
ctx->len = 0;
return CRYPT_SUCCESS;
}
int32_t CipherMacGetMacLen(const Cipher_MAC_Common_Ctx *ctx, void *val, uint32_t len)
{
if (ctx == NULL || ctx->method == NULL || val == NULL || len != sizeof(uint32_t)) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return CRYPT_NULL_INPUT;
}
*(uint32_t *)val = ctx->method->blockSize;
return CRYPT_SUCCESS;
}
#endif // #if defined(HITLS_CRYPTO_CBC_MAC) || defined(HITLS_CRYPTO_CMAC)
| 2302_82127028/openHiTLS-examples_1508 | crypto/cmac/src/cipher_mac_common.c | C | unknown | 5,210 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#ifndef CIPHER_MAC_COMMON_H
#define CIPHER_MAC_COMMON_H
#include "hitls_build.h"
#if defined(HITLS_CRYPTO_CBC_MAC) || defined(HITLS_CRYPTO_CMAC)
#include <stdint.h>
#include "crypt_local_types.h"
#include "crypt_cmac.h"
#ifdef __cplusplus
extern "C" {
#endif /* __cpluscplus */
#define CIPHER_MAC_MAXBLOCKSIZE 16
struct Cipher_MAC_Ctx {
const EAL_SymMethod *method;
void *key;
/* Stores the intermediate process data of CBC_MAC. The length is the block size. */
uint8_t data[CIPHER_MAC_MAXBLOCKSIZE];
uint8_t left[CIPHER_MAC_MAXBLOCKSIZE];
uint32_t len; /* Length of a non-integral data block */
};
typedef struct Cipher_MAC_Ctx Cipher_MAC_Common_Ctx;
#ifdef HITLS_CRYPTO_CBC_MAC
struct CBC_MAC_Ctx {
Cipher_MAC_Common_Ctx common;
CRYPT_PaddingType paddingType;
};
#endif
int32_t CipherMacInitCtx(Cipher_MAC_Common_Ctx *ctx, const EAL_SymMethod *method);
void CipherMacDeinitCtx(Cipher_MAC_Common_Ctx *ctx);
int32_t CipherMacInit(Cipher_MAC_Common_Ctx *ctx, const uint8_t *key, uint32_t len);
int32_t CipherMacUpdate(Cipher_MAC_Common_Ctx *ctx, const uint8_t *in, uint32_t len);
int32_t CipherMacReinit(Cipher_MAC_Common_Ctx *ctx);
int32_t CipherMacDeinit(Cipher_MAC_Common_Ctx *ctx);
int32_t CipherMacGetMacLen(const Cipher_MAC_Common_Ctx *ctx, void *val, uint32_t len);
#ifdef __cplusplus
}
#endif /* __cpluscplus */
#endif // #if defined(HITLS_CRYPTO_CBC_MAC) || defined(HITLS_CRYPTO_CMAC)
#endif // CIPHER_MAC_COMMON_H
| 2302_82127028/openHiTLS-examples_1508 | crypto/cmac/src/cipher_mac_common.h | C | unknown | 2,084 |
/*
* This file is part of the openHiTLS project.
*
* openHiTLS is licensed under the Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
#include "hitls_build.h"
#ifdef HITLS_CRYPTO_CMAC
#include <stdlib.h>
#include "bsl_sal.h"
#include "crypt_errno.h"
#include "crypt_utils.h"
#include "bsl_err_internal.h"
#include "cipher_mac_common.h"
#include "crypt_cmac.h"
#include "eal_mac_local.h"
CRYPT_CMAC_Ctx *CRYPT_CMAC_NewCtx(CRYPT_MAC_AlgId id)
{
int32_t ret;
EAL_MacDepMethod method = {0};
ret = EAL_MacFindDepMethod(id, NULL, NULL, &method, NULL, false);
if (ret != CRYPT_SUCCESS) {
return NULL;
}
CRYPT_CMAC_Ctx *ctx = BSL_SAL_Calloc(1, sizeof(CRYPT_CMAC_Ctx));
if (ctx == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_MEM_ALLOC_FAIL);
return NULL;
}
ret = CipherMacInitCtx(ctx, method.method.sym);
if (ret != CRYPT_SUCCESS) {
BSL_SAL_Free(ctx);
return NULL;
}
return ctx;
}
CRYPT_CMAC_Ctx *CRYPT_CMAC_NewCtxEx(void *libCtx, CRYPT_MAC_AlgId id)
{
(void)libCtx;
return CRYPT_CMAC_NewCtx(id);
}
int32_t CRYPT_CMAC_Init(CRYPT_CMAC_Ctx *ctx, const uint8_t *key, uint32_t len, void *param)
{
(void)param;
return CipherMacInit((Cipher_MAC_Common_Ctx *)ctx, key, len);
}
int32_t CRYPT_CMAC_Update(CRYPT_CMAC_Ctx *ctx, const uint8_t *in, uint32_t len)
{
return CipherMacUpdate((Cipher_MAC_Common_Ctx *)ctx, in, len);
}
static inline void LeftShiftOneBit(const uint8_t *in, uint32_t len, uint8_t *out)
{
uint32_t i = len - 1;
out[i] = (in[i] << 1) | 0;
do {
i--;
out[i] = (in[i] << 1) | (in[i + 1] >> 7); // 7 is used to obtain the most significant bit of the 8-bit data.
} while (i != 0);
}
static void CMAC_Final(CRYPT_CMAC_Ctx *ctx)
{
const uint8_t z[CIPHER_MAC_MAXBLOCKSIZE] = {0};
uint8_t rb;
uint8_t l[CIPHER_MAC_MAXBLOCKSIZE];
uint8_t k1[CIPHER_MAC_MAXBLOCKSIZE];
const EAL_SymMethod *method = ctx->method;
uint32_t blockSize = method->blockSize;
int32_t ret;
ret = method->encryptBlock(ctx->key, z, l, blockSize);
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
return;
}
LeftShiftOneBit(l, blockSize, k1);
if (blockSize == CIPHER_MAC_MAXBLOCKSIZE) {
rb = 0x87; /* When the AES algorithm is used and the blocksize is 128 bits, rb uses 0x87. */
} else {
rb = 0x1B; /* When the DES and TDES algorithms are used and blocksize is 64 bits, rb uses 0x1B. */
}
if ((l[0] & 0x80) != 0) {
k1[blockSize - 1] ^= rb;
}
uint32_t length = ctx->len;
if (length == blockSize) { // When the message length is an integer multiple of blockSize, use K1
DATA_XOR(ctx->left, k1, ctx->left, blockSize);
} else { // The message length is not an integer multiple of blockSize. Use K2 after padding.
/* padding */
ctx->left[length++] = 0x80; // 0x80 indicates that the first bit of the data is added with 1.
while (length < blockSize) {
ctx->left[length++] = 0;
}
uint8_t k2[CIPHER_MAC_MAXBLOCKSIZE];
LeftShiftOneBit(k1, blockSize, k2);
if ((k1[0] & 0x80) != 0) {
k2[blockSize - 1] ^= rb;
}
DATA_XOR(ctx->left, k2, ctx->left, blockSize);
ctx->len = blockSize;
}
}
int32_t CRYPT_CMAC_Final(CRYPT_CMAC_Ctx *ctx, uint8_t *out, uint32_t *len)
{
if (ctx == NULL || ctx->method == NULL || len == NULL || out == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return CRYPT_NULL_INPUT;
}
const EAL_SymMethod *method = ctx->method;
uint32_t blockSize = method->blockSize;
if (*len < blockSize) {
BSL_ERR_PUSH_ERROR(CRYPT_CMAC_OUT_BUFF_LEN_NOT_ENOUGH);
return CRYPT_CMAC_OUT_BUFF_LEN_NOT_ENOUGH;
}
CMAC_Final(ctx);
DATA_XOR(ctx->left, ctx->data, ctx->left, blockSize);
int32_t ret = method->encryptBlock(ctx->key, ctx->left, out, blockSize);
if (ret != CRYPT_SUCCESS) {
BSL_ERR_PUSH_ERROR(ret);
return ret;
}
*len = blockSize;
return CRYPT_SUCCESS;
}
int32_t CRYPT_CMAC_Reinit(CRYPT_CMAC_Ctx *ctx)
{
return CipherMacReinit((Cipher_MAC_Common_Ctx *)ctx);
}
int32_t CRYPT_CMAC_Deinit(CRYPT_CMAC_Ctx *ctx)
{
return CipherMacDeinit((Cipher_MAC_Common_Ctx *)ctx);
}
int32_t CRYPT_CMAC_Ctrl(CRYPT_CMAC_Ctx *ctx, uint32_t opt, void *val, uint32_t len)
{
if (ctx == NULL) {
BSL_ERR_PUSH_ERROR(CRYPT_NULL_INPUT);
return CRYPT_NULL_INPUT;
}
switch (opt) {
case CRYPT_CTRL_GET_MACLEN:
return CipherMacGetMacLen(ctx, val, len);
default:
break;
}
BSL_ERR_PUSH_ERROR(CRYPT_CMAC_ERR_UNSUPPORTED_CTRL_OPTION);
return CRYPT_CMAC_ERR_UNSUPPORTED_CTRL_OPTION;
}
void CRYPT_CMAC_FreeCtx(CRYPT_CMAC_Ctx *ctx)
{
CipherMacDeinitCtx(ctx);
BSL_SAL_Free(ctx);
}
#endif /* HITLS_CRYPTO_CMAC */
| 2302_82127028/openHiTLS-examples_1508 | crypto/cmac/src/cmac.c | C | unknown | 5,318 |