code
stringlengths 1
1.05M
| repo_name
stringlengths 6
83
| path
stringlengths 3
242
| language
stringclasses 222
values | license
stringclasses 20
values | size
int64 1
1.05M
|
|---|---|---|---|---|---|
/******************************************************************************
*
* Copyright(c) 2019 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#ifndef _PHL_DEBUG_H_
#define _PHL_DEBUG_H_
/* phl log level */
enum {
_PHL_NONE_ = 0,
_PHL_ALWAYS_ = 1,
_PHL_ERR_ = 2,
_PHL_WARNING_ = 3,
_PHL_INFO_ = 4,
_PHL_DEBUG_ = 5,
_PHL_MAX_ = 6
};
#define PHL_PREFIX "PHL: "
#define HALPS_PREFIX "HALPS:"
#define PHL_DBG_OUTBUF(max_buff_len, used_len, buff_addr, remain_len, fmt, ...)\
do { \
u32 *used_len_tmp = &(used_len); \
if (*used_len_tmp < max_buff_len) \
*used_len_tmp += _os_snprintf(buff_addr, remain_len, fmt, ##__VA_ARGS__);\
} while (0)
#ifdef CONFIG_RTW_DEBUG
/*Define the tracing components*/
#define COMP_PHL_DBG BIT0 /* For function call tracing. */
#define COMP_PHL_RECV BIT1
#define COMP_PHL_XMIT BIT2
#define COMP_PHL_MAC BIT3
#define COMP_PHL_SOUND BIT4
#define COMP_PHL_WOW BIT5
#define COMP_PHL_TRIG BIT6
#define COMP_PHL_PKTOFLD BIT7
#define COMP_PHL_FSM BIT8
#define COMP_PHL_PS BIT9
#define COMP_PHL_PSTS BIT10
#define COMP_PHL_BB BIT11
#define COMP_PHL_RF BIT12
#define COMP_PHL_LED BIT13
#define COMP_PHL_MCC BIT14
#define COMP_PHL_P2PPS BIT15
#define COMP_PHL_ECSA BIT16
#define COMP_PHL_CMDDISP BIT17
#define COMP_PHL_BTC BIT18
#define COMP_PHL_TWT BIT19
extern u32 phl_log_components;
extern u8 phl_log_level;
#define DEBUG_MAX_NAME_LEN 50
struct dbg_alloc_buf {
_os_list list;
u8 file_name[DEBUG_MAX_NAME_LEN];
u8 func_name[DEBUG_MAX_NAME_LEN];
u32 line_num;
u32 buf_size;
u8 *buf_ptr;
};
struct dbg_mem_ctx {
_os_list alloc_buf_list;
_os_lock alloc_buf_list_lock;
u32 alloc_buf_cnt;
};
#undef PHL_TRACE
#define PHL_TRACE(comp, level, fmt, ...) \
do {\
if(((comp) & phl_log_components) && (level <= phl_log_level)) {\
_os_dbgdump(PHL_PREFIX fmt, ##__VA_ARGS__);\
} \
} while (0)
#undef PHL_DATA
#define PHL_DATA(comp, level, fmt, ...) \
do {\
if(((comp) & phl_log_components) && (level <= phl_log_level)) {\
_os_dbgdump(KERN_CONT fmt, ##__VA_ARGS__);\
} \
} while (0)
#undef PHL_ASSERT
#define PHL_ASSERT(fmt, ...) \
do {\
PHL_TRACE(COMP_PHL_DBG, _PHL_ERR_, "ERROR " fmt, ##__VA_ARGS__);\
_os_assert(true); \
} while (0)
#undef PHL_PRINT
#define PHL_PRINT(fmt, ...) \
do {\
PHL_TRACE(COMP_PHL_DBG, _PHL_ALWAYS_, fmt, ##__VA_ARGS__);\
} while (0)
#undef PHL_ERR
#define PHL_ERR(fmt, ...) \
do {\
PHL_TRACE(COMP_PHL_DBG, _PHL_ERR_, "ERROR " fmt, ##__VA_ARGS__);\
} while (0)
#undef PHL_WARN
#define PHL_WARN(fmt, ...) \
do {\
PHL_TRACE(COMP_PHL_DBG, _PHL_WARNING_, "WARN " fmt, ##__VA_ARGS__);\
} while (0)
#undef PHL_INFO
#define PHL_INFO(fmt, ...) \
do {\
PHL_TRACE(COMP_PHL_DBG, _PHL_INFO_, fmt, ##__VA_ARGS__);\
} while (0)
#undef PHL_DBG
#define PHL_DBG(fmt, ...) \
do {\
PHL_TRACE(COMP_PHL_DBG, _PHL_DEBUG_, fmt, ##__VA_ARGS__);\
} while (0)
#define FUNCIN() PHL_TRACE(COMP_PHL_DBG, _PHL_DEBUG_, "Enter %s\n", __FUNCTION__)
#define FUNCOUT() PHL_TRACE(COMP_PHL_DBG, _PHL_DEBUG_, "Leave %s\n", __FUNCTION__)
#define FUNCIN_WSTS(_sts) PHL_TRACE(COMP_PHL_DBG, _PHL_DEBUG_, "Enter with 0x%08X %s\n", _sts, __FUNCTION__)
#define FUNCOUT_WSTS(_sts) PHL_TRACE(COMP_PHL_DBG, _PHL_DEBUG_, "Leave with 0x%08X %s\n", _sts, __FUNCTION__)
void debug_dump_data(u8 *buf, u32 buf_len, const char *prefix);
void rt_alloc_dbg_buf(void *phl, u8 *buf_ptr, u32 buf_size,
const u8 *file_name, u32 line_num, const u8 *func_name);
void rt_free_dbg_buf(void *phl, u8 *buf_ptr, u32 buf_size,
const u8 *file_name, u32 line_num, const u8 *func_name);
void rt_mem_dbg_init(void *phl);
void rt_mem_dbg_deinit(void *phl);
u32 rtw_phl_dbg_ctrl_comp(u8 ctrl, u8 comp_bit);
void debug_dump_mac_address(u8 *mac_addr);
#define phl_ops_error_msg(ops_fun) \
PHL_ERR("### %s - Please hook phl_hci_ops.%s ###\n", __func__, ops_fun)
#define hal_error_msg(ops_fun) \
PHL_PRINT("### %s - Error : Please hook hal_ops.%s ###\n", __FUNCTION__, ops_fun)
#else /* CONFIG_RTW_DEBUG */
#define PHL_TRACE(comp, level, fmt, ...)
#define PHL_PRINT(fmt, ...)
#define PHL_ERR(fmt, ...)
#define PHL_WARN(fmt, ...)
#define PHL_INFO(fmt, ...)
#define PHL_DBG(fmt, ...)
#define PHL_DATA(fmt, ...)
#define PHL_ASSERT(fmt, ...)
#define FUNCIN()
#define FUNCOUT()
#define FUNCIN_WSTS(_sts)
#define FUNCOUT_WSTS(_sts)
#define debug_dump_data(_buf, _buf_len, _prefix)
#define debug_dump_mac_address(_mac_addr)
#define rt_alloc_dbg_buf(_phl, _buf, _buf_size, _file_name, _line_num, \
_func_name)
#define rt_free_dbg_buf(_phl, _buf, _buf_size, _file_name, _line_num, \
_func_name)
#define rt_mem_dbg_init(_phl)
#define rt_mem_dbg_deinit(_phl)
#define rtw_phl_dbg_ctrl_comp(_ctrl, _comp_bit) 0
#define phl_ops_error_msg(ops_fun) do {} while (0)
#define hal_error_msg(ops_fun) do {} while (0)
#endif /* CONFIG_RTW_DEBUG */
#endif /* _PHL_DEBUG_H_ */
|
2301_81045437/rtl8852be
|
phl/phl_debug.h
|
C
|
agpl-3.0
| 5,371
|
/******************************************************************************
*
* Copyright(c) 2019 - 2021 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#ifndef _PHL_DEF_H_
#define _PHL_DEF_H_
enum phl_packet_type {
PACKET_NORMAL,
PACKET_DHCP,
PACKET_ARP,
PACKET_EAPOL,
PACKET_EAPOL_START,
PACKET_MAX
};
/*HW_BAND0 - CMAC0 + PHY0 + S0*/
/*HW_BAND1 - CMAC1 + PHY1 + S1*/
/*wifi_role->hw_band*/
enum phl_band_idx {
HW_BAND_0,
HW_BAND_1,
HW_BAND_MAX
};
/*wifi_role->hw_port*/
enum phl_hw_port {
HW_PORT0,
HW_PORT1,
HW_PORT2,
HW_PORT3,
HW_PORT4,
HW_PORT_MAX,
};
#define RTW_MAX_TID_NUM 8
#define RTW_MAX_AC_QUEUE_NUM 4
enum phl_ac_queue {
PHL_BE_QUEUE_SEL = 0,
PHL_BK_QUEUE_SEL = 1,
PHL_VI_QUEUE_SEL = 2,
PHL_VO_QUEUE_SEL = 3,
PHL_AC_QUEUE_TOTAL
};
/**
* struct rtw_chan_def - channel defination
* @chan: the (control/primary) channel
* @center_ch: the center channel
* @bw: channel bandwidth
* @center_freq1: center frequency of first segment
* @center_freq2: center frequency of second segment
* (only with 80+80 MHz)
*/
struct rtw_chan_def {
enum band_type band; /* protocol -2.4G,5G,6G*/
enum channel_width bw;
enum chan_offset offset;
u8 chan; /*primary channel*/
u8 center_ch;
u16 hw_value;
u32 center_freq1;
u32 center_freq2;
};
struct chg_opch_param {
struct rtw_wifi_role_t *wrole;
struct rtw_chan_def new_chdef;
struct rtw_chan_def ori_chdef;
enum rtw_phl_status cmd_start_sts;
void (*chg_opch_done)(void *priv, u8 ridx, enum rtw_phl_status status);
};
/**
* struct rtw_chan_ctx - channel context
* @list:
* @chan_ctx_lock:
* @chan_def:
*/
struct rtw_chan_ctx {
_os_list list;
struct rtw_chan_def chan_def;
u8 role_map; /*used role_idx*/
bool dfs_enabled;
};
#ifdef CONFIG_PCI_HCI
struct rtw_pci_info {
u8 dummy;
};
#endif
#ifdef CONFIG_USB_HCI
struct rtw_usb_info {
enum rtw_usb_speed usb_speed; /* USB 1.1, 2.0 or 3.0 */
u16 usb_bulkout_size;
u8 outep_num;
u8 inep_num;
};
enum phl_usb_rx_agg_mode {
PHL_RX_AGG_DISABLE,
PHL_RX_AGG_DEFAULT,
PHL_RX_AGG_SMALL_PKT,
PHL_RX_AGG_USER_DEFINE,
};
/*
* refers to _usb.h
* #define SWITCHMODE 0x2
* #define FORCEUSB3MODE 0x1
* #define FORCEUSB2MODE 0x0
*/
enum rtw_usb_sw_ability {
RTW_USB2_ONLY = 0,
RTW_USB3_ONLY,
RTW_USB_SUPPORT_SWITCH,
RTW_USB_SUPPORT_MAX
};
#endif
#ifdef CONFIG_SDIO_HCI
struct rtw_sdio_info {
unsigned int clock;
unsigned int timing;
u8 sd3_bus_mode;
u16 block_sz;
u16 io_align_sz;
u16 tx_align_sz;
bool tx_512_by_byte_mode; /* Send 512 bytes by cmd53 byte or */
/* block mode. */
};
#endif
struct rtw_ic_info {
enum rtl_ic_id ic_id;
enum rtw_hci_type hci_type;
#ifdef CONFIG_SDIO_HCI
struct rtw_sdio_info sdio_info;
#endif
#ifdef CONFIG_USB_HCI
struct rtw_usb_info usb_info;
#endif
#ifdef CONFIG_PCI_HCI
struct rtw_pci_info pci_info;
#endif
};
enum rtw_proc_cmd_type {
RTW_PROC_CMD_UNKNOW,
RTW_PROC_CMD_BB, /* 1 */
RTW_PROC_CMD_RF, /* 2 */
RTW_PROC_CMD_MAC, /* 3 */
RTW_PROC_CMD_PHL, /* 4 */
RTW_PROC_CMD_CORE, /* 5 */
RTW_PROC_CMD_BTC, /* 6 */
RTW_PROC_CMD_EFUSE, /* 7 */
RTW_PROC_CMD_MAX
};
enum rtw_arg_type {
RTW_ARG_TYPE_UNKNOW,
RTW_ARG_TYPE_BUF, /* 1 */
RTW_ARG_TYPE_ARRAY, /* 2 */
RTW_ARG_TYPE_MAX
};
#define MAX_ARGC 20
#define MAX_ARGV 16
struct rtw_proc_cmd {
enum rtw_arg_type in_type;
u32 in_cnt_len;
union {
char *buf;
char vector[MAX_ARGC][MAX_ARGV];
}in;
};
enum rtw_para_src {
RTW_PARA_SRC_INTNAL, /* 0 */
RTW_PARA_SRC_EXTNAL, /* 1 */
RTW_PARA_SRC_CUSTOM, /* 2 */
RTW_PARA_SRC_MAX
};
struct rtw_para_info_t {
enum rtw_para_src para_src;
char para_path[256];
u32 para_data_len;
u32 *para_data;
};
#define regd_name_max_size 32
struct rtw_para_pwrlmt_info_t {
enum rtw_para_src para_src;
char para_path[256];
u32 para_data_len;
u32 *para_data;
char ext_regd_name[regd_name_max_size][10];
u16 ext_regd_arridx;
u16 ext_reg_map_num;
u8 *ext_reg_codemap;
};
#define RTW_PHL_HANDLER_STATUS_INITIALIZED BIT0
#define RTW_PHL_HANDLER_STATUS_SET BIT1
#define RTW_PHL_HANDLER_STATUS_RELEASED BIT2
#define RTW_PHL_HANDLER_PRIO_HIGH 0
#define RTW_PHL_HANDLER_PRIO_NORMAL 1
#define RTW_PHL_HANDLER_PRIO_LOW 2
enum rtw_phl_evt {
RTW_PHL_EVT_RX = BIT0,
RTW_PHL_EVT_TX_RECYCLE = BIT1,
RTW_PHL_EVT_MAX = BIT31
};
enum rtw_phl_config_int {
RTW_PHL_STOP_RX_INT,
RTW_PHL_RESUME_RX_INT,
RTW_PHL_SER_HANDSHAKE_MODE,
RTW_PHL_EN_HCI_INT,
RTW_PHL_DIS_HCI_INT,
RTW_PHL_CLR_HCI_INT,
RTW_PHL_CONFIG_INT_MAX
};
/**
* phl_handler - scheduled by core layer or phl itself
* and the properties is assigned by different hanlder type
* @status: handler current status defined by RTW_PHL_HANDLER_STATUS_XXX
* @type: define different properties of handler - tasklet, thread, workitem
* @handle: store different type of handler structure
* @callback: handler callback function
* @context: context used in handler callback function
*/
struct rtw_phl_handler {
char status;
char type;
void *drv_priv;
struct _os_handler os_handler;
void (*callback)(void *context);
void *context;
};
struct rtw_xmit_req;
struct rtw_aoac_report;
struct rtw_phl_evt_ops {
enum rtw_phl_status (*rx_process)(void *drv_priv);
enum rtw_phl_status (*tx_recycle)(void *drv_priv, struct rtw_xmit_req *txreq);
enum rtw_phl_status (*tx_test_recycle)(void *phl, struct rtw_xmit_req *txreq);
bool (*set_rf_state)(void *drv_priv, enum rtw_rf_state state_to_set);
void (*wow_handle_sec_info_update)(void *drv_priv, struct rtw_aoac_report *aoac_info, u8 aoac_report_get_ok, u8 phase);
void (*indicate_wake_rsn)(void *drv_priv, u8 rsn);
#ifdef CONFIG_SYNC_INTERRUPT
void (*interrupt_restore)(void *drv_priv, u8 rx);
void (*set_interrupt_caps)(void *drv_priv, u8 en);
#endif /* CONFIG_SYNC_INTERRUPT */
void (*ap_ps_sta_ps_change)(void *drv_priv, u8 role_id, u8 *sta_mac,
int power_save);
u8 (*issue_null_data)(void *priv, u8 ridx, bool ps);
};
/*
* PHL CMD support direct execution, no-wait: synchronization, wait:asynchronization
* PHL_CMD_CMD_DIRECTLY: call PHL API including I/O operation directly
* PHL_CMD_NO_WARIT: send phl cmd msg to cmd dispatcher and do not wait for completion
* PHL_CMD_WAIT: send phl cmd msg to cmd dispatcher and wait for completion
*/
enum phl_cmd_type {
PHL_CMD_DIRECTLY,
PHL_CMD_NO_WAIT,
PHL_CMD_WAIT,
PHL_CMD_MAX,
};
enum role_type {
PHL_RTYPE_NONE,
PHL_RTYPE_STATION,
PHL_RTYPE_AP,
PHL_RTYPE_VAP,
PHL_RTYPE_ADHOC,
PHL_RTYPE_ADHOC_MASTER,
PHL_RTYPE_MESH,
PHL_RTYPE_MONITOR,
PHL_RTYPE_P2P_DEVICE,
PHL_RTYPE_P2P_GC,
PHL_RTYPE_P2P_GO,
PHL_RTYPE_TDLS,
PHL_RTYPE_NAN,
PHL_MLME_MAX
};
enum role_state {
PHL_ROLE_START, /* 0 - PHL*/
PHL_ROLE_STOP, /* 1 - PHL*/
PHL_ROLE_CHG_TYPE, /* 2 - PHL*/
PHL_ROLE_UPDATE_NOA, /* 3 - PHL*/
PHL_ROLE_MSTS_STA_CONN_START, /*CORE*/
PHL_ROLE_MSTS_STA_CONN_END,/*CORE*/
PHL_ROLE_MSTS_STA_DIS_CONN,/*CORE*/
PHL_ROLE_MSTS_AP_START,/*CORE*/
PHL_ROLE_MSTS_AP_STOP,/*CORE*/
PHL_ROLE_STATE_UNKNOWN,
};
enum mlme_state {
MLME_NO_LINK,
MLME_LINKING,
MLME_LINKED
};
enum wr_chg_id {
WR_CHG_TYPE,
WR_CHG_MADDR,
WR_CHG_AP_PARAM,
WR_CHG_EDCA_PARAM,
WR_CHG_MU_EDCA_PARAM,
WR_CHG_MU_EDCA_CFG,
WR_CHG_BSS_COLOR,
WR_CHG_RTS_TH,
WR_CHG_DFS_HE_TB_CFG,
WR_CHG_TRX_PATH,
WR_CHG_MAX,
};
enum wr_status{
WR_STATUS_PS_ANN = BIT0,
WR_STATUS_BCN_STOP = BIT1,
WR_STATUS_TSF_SYNC = BIT2,
WR_STATUS_MAX = BIT7
};
enum rtw_cfg_type { /* sync with pcfg_type */
CFG_TBTT_AGG,
CFG_TBTT_SHIFT,
CFG_HIQ_WIN,
CFG_HIQ_DTIM,
CFG_HIQ_MAX,
CFG_BCN_INTERVAL, /* Beacon Interval */
CFG_BSS_CLR
};
struct rtw_ap_param {
u32 cfg_id;
u32 value;
};
struct rtw_edca_param {
/* Access Category, 0:BE, 1:BK, 2:VI, 3:VO */
u8 ac;
/*
* EDCA parameter
* |31...16|15...12|11...8|7...0|
* | TXOP| CWMAX| CWMIN| AIFS|
*/
u32 param;
};
struct rtw_mu_edca_param {
u8 ac;
u8 aifsn;
u8 cw;
u8 timer;
};
struct rtw_trx_path_param {
enum rf_path tx;
enum rf_path rx;
u8 tx_nss;
u8 rx_nss;
};
#define MAX_STORE_BCN_NUM 3
enum conf_lvl {
CONF_LVL_NONE = 0,
CONF_LVL_LOW,
CONF_LVL_MID,
CONF_LVL_HIGH
};
struct rtw_bcn_offset {
u16 offset; /*TU*/
enum conf_lvl conf_lvl; /*confidence level*/
u16 cr_tbtt_shift; /* CR current setting */
};
/*
* Store rx bcn tsf info
* @num: the store noumber of "info" array
* @idx: store current index of "info" array
* @info: store array. info[0]: store tsf, info[1]: store mod(TU), info[2]: store hw rx time
* @offset_i: Bcn offset info. Dont't access directionly this variable for application.
You can get offset_i info from phl_get_sta_bcn_offset_info.
*/
struct rtw_rx_bcn_info {
u8 idx;
u8 num;
u64 info[3][MAX_STORE_BCN_NUM];
struct rtw_bcn_offset offset_i;
};
struct rtw_bcn_pkt_info {
struct rtw_phl_stainfo_t *sta;
u64 tsf;
u64 hw_tsf;
};
struct rtw_rts_threshold {
u16 rts_time_th;
u16 rts_len_th;
};
enum phl_module_id{
/* 0 ~ 128 PHL background module starts from here*/
/* 1,2,3 cmd controller section */
PHL_BK_MDL_START = 0,
PHL_MDL_PHY_MGNT = 1,
PHL_MDL_TX = 2,
PHL_MDL_RX = 3,
/* above enum is fixed, add new module id from here*/
/* 10 ~ 40 protocol, wifi role section*/
PHL_BK_MDL_ROLE_START = 10,
PHL_MDL_MRC = 10, /* Multi-Role Controller intead of STA/P2P role /NAN/AP*/
PHL_MDL_SOUND = 11,
PHL_BK_MDL_ROLE_END = 40,
/* 41 ~ 70 mandatory background module section*/
PHL_BK_MDL_MDRY_START = 41,
PHL_MDL_POWER_MGNT = 41,
PHL_MDL_SER = 42,
PHL_BK_MDL_MDRY_END = 70,
/* 70 ~ 127 optional background module section*/
PHL_BK_MDL_OPT_START = 71,
PHL_MDL_BTC = 71,
/*PHL_MDL_RSVD = 72,*/
PHL_MDL_CUSTOM = 73,
PHL_MDL_WOW = 74,
PHL_MDL_PSTS = 75,
PHL_MDL_LED = 76,
PHL_MDL_GENERAL = 77,
PHL_MDL_REGU = 78,
PHL_BK_MDL_OPT_END = 127,
/* Fixed BK MDL Max Value*/
PHL_BK_MDL_END = 128,
/* 129 ~ 256 PHL foreground module starts from here*/
PHL_FG_MDL_START = 129,
PHL_FUNC_MDL_TEST_MODULE = 129,
PHL_FG_MDL_SCAN = 130,
PHL_FG_MDL_CONNECT = 131,
PHL_FG_MDL_DISCONNECT = 132,
PHL_FG_MDL_AP_START = 133,
PHL_FG_MDL_AP_STOP = 134,
PHL_FG_MDL_ECSA = 135,
PHL_FG_MDL_END = 254,
/* Fixed MDL Max Value*/
PHL_MDL_ID_MAX = 255
};
/* General phl event id shall share this common enum definition
* if definition of private events for a specific module is required,
* please be sure to start its enum from PRIVATE_EVT_START(0x8000)
*/
enum phl_msg_evt_id {
MSG_EVT_NONE = 0,
MSG_EVT_PHY_ON = 1,
MSG_EVT_PHY_IDLE = 2,
MSG_EVT_SCAN_START = 3,
MSG_EVT_SCAN_END = 4,
MSG_EVT_CONNECT_START = 5,
MSG_EVT_CONNECT_LINKED = 6,
MSG_EVT_CONNECT_END = 7,
MSG_EVT_SER_L1 = 8,
MSG_EVT_SER_L2 = 9,
MSG_EVT_FWDL_OK = 10,
MSG_EVT_FWDL_FAIL = 11,
MSG_EVT_HAL_INIT_OK = 12,
MSG_EVT_HAL_INIT_FAIL = 13,
MSG_EVT_MP_CMD_DONE = 14,
/* wow */
MSG_EVT_WOW_ENTER = 15,
MSG_EVT_WOW_LEAVE = 16,
MSG_EVT_WOW_WAKE_RSN = 17,
MSG_EVT_BCN_RESEND = 18,
MSG_EVT_DUMP_PLE_BUFFER = 19,
MSG_EVT_MP_RX_PHYSTS = 20,
MSG_EVT_ROLE_NTFY = 21,
MSG_EVT_RX_PSTS = 22,
MSG_EVT_SWCH_START = 23,
MSG_EVT_SWCH_DONE = 24,
MSG_EVT_DISCONNECT_PREPARE = 25,
MSG_EVT_DISCONNECT = 26,
MSG_EVT_TSF_SYNC_DONE = 27,
MSG_EVT_TX_RESUME = 28,
MSG_EVT_AP_START_PREPARE =29,
MSG_EVT_AP_START = 30,
MSG_EVT_AP_START_END = 31,
MSG_EVT_AP_STOP_PREPARE = 32,
MSG_EVT_AP_STOP = 33,
MSG_EVT_PCIE_TRX_MIT = 34,
MSG_EVT_BTC_TMR = 35,
MSG_EVT_BTC_FWEVNT = 36,
MSG_EVT_BTC_REQ_BT_SLOT = 37,
MSG_EVT_BTC_PKT_EVT_NTFY = 38,
/* ser*/
MSG_EVT_SER_L0_RESET = 39, /* L0 notify only */
MSG_EVT_SER_M1_PAUSE_TRX = 40,
MSG_EVT_SER_IO_TIMER_EXPIRE = 41,
MSG_EVT_SER_FW_TIMER_EXPIRE = 42,
MSG_EVT_SER_M3_DO_RECOV = 43,
MSG_EVT_SER_M5_READY = 44,
MSG_EVT_SER_M9_L2_RESET = 45,
MSG_EVT_SER_EVENT_CHK = 46,
MSG_EVT_SER_POLLING_CHK = 47,
MSG_EVT_ECSA_START = 48,
MSG_EVT_ECSA_UPDATE_FIRST_BCN_DONE = 49,
MSG_EVT_ECSA_COUNT_DOWN = 50,
MSG_EVT_ECSA_SWITCH_START = 51,
MSG_EVT_ECSA_SWITCH_DONE = 52,
MSG_EVT_ECSA_CHECK_TX_RESUME = 53,
MSG_EVT_ECSA_DONE = 54,
MSG_EVT_LISTEN_STATE_EXPIRE = 55,
/* beamform */
MSG_EVT_SET_VHT_GID = 56,
MSG_EVT_WATCHDOG = 57,
MSG_EVT_DEV_CANNOT_IO = 58,
MSG_EVT_DEV_RESUME_IO = 59,
MSG_EVT_FORCE_USB_SW = 60,
MSG_EVT_GET_USB_SPEED = 61,
MSG_EVT_GET_USB_SW_ABILITY = 62,
MSG_EVT_CFG_AMPDU = 63,
MSG_EVT_DFS_PAUSE_TX = 64,
MSG_EVT_ROLE_RECOVER = 65,
MSG_EVT_ROLE_SUSPEND = 66,
MSG_EVT_HAL_SET_L2_LEAVE = 67,
MSG_EVT_NOTIFY_HAL = 68,
MSG_EVT_ISSUE_BCN = 69,
MSG_EVT_FREE_BCN = 70,
MSG_EVT_STOP_BCN = 71,
MSG_EVT_SEC_KEY = 72,
MSG_EVT_ROLE_START = 73,
MSG_EVT_ROLE_CHANGE = 74,
MSG_EVT_ROLE_STOP = 75,
MSG_EVT_STA_INFO_CTRL = 76,
MSG_EVT_STA_MEDIA_STATUS_UPT = 77,
MSG_EVT_CFG_CHINFO = 78,
MSG_EVT_STA_CHG_STAINFO = 79,
MSG_EVT_HW_TRX_RST_RESUME = 80,
MSG_EVT_HW_TRX_PAUSE = 81,
MSG_EVT_SW_TX_RESUME = 82,
MSG_EVT_SW_RX_RESUME = 83,
MSG_EVT_SW_TX_PAUSE = 84,
MSG_EVT_SW_RX_PAUSE = 85,
MSG_EVT_SW_TX_RESET = 86,
MSG_EVT_SW_RX_RESET = 87,
MSG_EVT_TRX_SW_PAUSE = 88,
MSG_EVT_TRX_SW_RESUME = 89,
MSG_EVT_TRX_PAUSE_W_RST = 90,
MSG_EVT_TRX_RESUME_W_RST = 91,
/* Regulation*/
MSG_EVT_REGU_SET_DOMAIN = 92,
MSG_EVT_RF_ON = 93,
MSG_EVT_RF_OFF = 94,
MSG_EVT_WPS_PRESSED = 95,
MSG_EVT_WPS_RELEASED = 96,
MSG_EVT_SURPRISE_REMOVE = 97,
MSG_EVT_DATA_PATH_START = 98,
MSG_EVT_DATA_PATH_STOP = 99,
MSG_EVT_TRX_PWR_REQ = 100,
/* tdls */
MSG_EVT_TDLS_SYNC = 101,
/* beamformee */
MSG_EVT_SET_BFEE_AID = 102,
/* ccx */
MSG_EVT_CCX_REPORT_TX_OK = 103,
MSG_EVT_CCX_REPORT_TX_FAIL = 104,
/* ps */
MSG_EVT_PS_CAP_CHG = 105,
MSG_EVT_PS_PERIOD_CHK = 106,
MSG_EVT_PS_DBG_LPS_ENTER = 107,
MSG_EVT_PS_DBG_LPS_LEAVE = 108,
MSG_EVT_PS_DBG_IPS_ENTER = 109,
MSG_EVT_PS_DBG_IPS_LEAVE = 110,
/* Change operating ch def(ch / bw) */
MSG_EVT_CHG_OP_CH_DEF_START = 111,
MSG_EVT_CHG_OP_CH_DEF_END = 112,
MSG_EVT_MDL_CHECK_STOP = 113,
/* dbg */
MSG_EVT_DBG_SIP_REG_DUMP = 200,
MSG_EVT_DBG_FULL_REG_DUMP = 201,
MSG_EVT_DBG_L2_DIAGNOSE = 202,
MSG_EVT_DBG_RX_DUMP = 203,
MSG_EVT_DBG_TX_DUMP = 204,
/* dbg end */
/* p2pps */
MSG_EVT_TSF32_TOG = 205,
/* p2pps end */
/*Add EVT-ID for linux core cmd temporality*/
/* sub module IO */
MSG_EVT_NOTIFY_BB = 300,
MSG_EVT_NOTIFY_RF = 301,
MSG_EVT_NOTIFY_MAC = 302,
/* sub module IO end*/
MSG_EVT_LINUX_CMD_WRK = 888,
MSG_EVT_LINUX_CMD_WRK_TRI_PS = 889,
/* LED */
MSG_EVT_LED_TICK = 5000,
MSG_EVT_LED_EVT_START = 5001,
MSG_EVT_LED_EVT_END = 5050,
MSG_EVT_MAX = 0x7fff
};
enum phl_msg_recver_layer {
MSG_RECV_PHL = 0,
MSG_RECV_CORE = 1,
MSG_RECV_MAX
};
enum phl_msg_indicator {
MSG_INDC_PRE_PHASE = BIT0,
MSG_INDC_FAIL = BIT1,
MSG_INDC_CANCEL = BIT2,
MSG_INDC_CANNOT_IO = BIT3
};
enum phl_msg_opt {
MSG_OPT_SKIP_NOTIFY_OPT_MDL = BIT0,
MSG_OPT_BLIST_PRESENT = BIT1,
MSG_OPT_CLR_SNDR_MSG_IF_PENDING = BIT2,
MSG_OPT_SEND_IN_ABORT = BIT3,
MSG_OPT_PENDING_DURING_CANNOT_IO = BIT4,
};
/* all module share this common enum definition */
enum phy_bk_module_opcode {
BK_MODL_OP_NONE = 0,
BK_MODL_OP_CHK_NEW_MSG,
BK_MODL_OP_INPUT_CMD,
BK_MODL_OP_STATE,
BK_MODL_OP_CUS_SET_ROLE_CAP,
BK_MODL_OP_CUS_UPDATE_ROLE_CAP,
BK_MODL_OP_MAX
};
/* Foreground cmd token opcode */
enum phy_fg_cmd_req_opcode {
FG_REQ_OP_NONE = 0,
FG_REQ_OP_GET_ROLE,
FG_REQ_OP_GET_MDL_ID,
#ifdef RTW_WKARD_MRC_ISSUE_NULL_WITH_SCAN_OPS
FG_REQ_OP_GET_SCAN_PARAM,
FG_REQ_OP_GET_ISSUE_NULL_OPS,
#endif
#ifdef RTW_WKARD_CMD_SCAN_EXTEND_ACTIVE_SCAN
FG_REQ_OP_NOTIFY_BCN_RCV,
#endif
#ifdef RTW_WKARD_CMD_SCAN_EXTEND_ACTION_FRAME_TX
FG_REQ_OP_NOTIFY_ACTION_FRAME_TX,
#endif
FG_REQ_OP_MAX
};
/* priority of phl background
module which would be considered when dispatching phl msg*/
enum phl_bk_module_priority {
PHL_MDL_PRI_ROLE = 0,
PHL_MDL_PRI_OPTIONAL,
PHL_MDL_PRI_MANDATORY,
PHL_MDL_PRI_MAX
};
enum phl_data_ctl_cmd {
PHL_DATA_CTL_HW_TRX_RST_RESUME = 1,
PHL_DATA_CTL_HW_TRX_PAUSE = 2,
PHL_DATA_CTL_SW_TX_RESUME = 3,
PHL_DATA_CTL_SW_RX_RESUME = 4,
PHL_DATA_CTL_SW_TX_PAUSE = 5,
PHL_DATA_CTL_SW_RX_PAUSE = 6,
PHL_DATA_CTL_SW_TX_RESET = 7,
PHL_DATA_CTL_SW_RX_RESET = 8,
PHL_DATA_CTL_TRX_SW_PAUSE = 9,
PHL_DATA_CTL_TRX_SW_RESUME = 10,
PHL_DATA_CTL_TRX_PAUSE_W_RST = 11,
PHL_DATA_CTL_TRX_RESUME_W_RST = 12,
PHL_DATA_CTL_MAX = 0xFF
};
/**
* phl_msg - define a general msg format for PHL/CORE layer module to handle
* one can easily extend additional mgnt info by encapsulating inside a file
* refer to
* struct phl_msg_ex in phl_msg_hub.c
* struct phl_dispr_msg_ex in phl_cmd_dispatcher.c
*
* @msg_id: indicate msg source & msg type
* BYTE 3: RSVD
* BYTE 2: PHL Module ID, refer to enum phl_module_id
* BYTE 0-1: event id, refer to enum phl_msg_evt_id
* @inbuf: input buffer that sent along with msg
* @inlen: input buffer length
* @outbuf: output buffer that returned after all phl modules have recved msg.
* @outlen: output buffer length
* @band_idx: index of Band(PHY) which associate to this msg
* @rsvd: feature reserved, passing object pointer.
* For example,
* - cmd_scan : [0]: wifi_role.
* - CANNOT_IO error: [0]: mdl handle.
*/
struct phl_msg{
u32 msg_id;
enum phl_band_idx band_idx;
u8* inbuf;
u8* outbuf;
u32 inlen;
u32 outlen;
void *rsvd[4];
};
struct msg_notify_map {
u8* id_arr;
u8 len;
};
struct msg_dispatch_seq {
struct msg_notify_map map[PHL_MDL_PRI_MAX];
};
struct msg_self_def_seq {
struct msg_dispatch_seq pre_prot_phase;
struct msg_dispatch_seq post_prot_phase;
};
struct msg_completion_routine {
void* priv;
void (*completion)(void* priv, struct phl_msg* msg);
};
/**
* phl_msg_attribute: used in phl_disp_eng_send_msg
* @opt: refers to enum phl_msg_opt.
* @notify: input id array (refer to enum phl_module_id)
* for indicating additional dependency
* @completion: completion routine
*/
struct phl_msg_attribute {
u8 opt;
struct msg_notify_map notify;
struct msg_completion_routine completion;
#ifdef CONFIG_CMD_DISP_SUPPORT_CUSTOM_SEQ
void *dispr_attr;
#endif
};
/**
* phl_module_op_info - set by core layer or phl itself,
* op code process is an synchronous process.
* which would be handled directly by module handler
* @op_code: refer to enum phy_module_opcode
* @inbuf: input buffer that sent along with msg
* @inlen: input buffer length
* @outbuf: output buffer that returned after all phy modules have recved msg.
* @outlen: output buffer length
*/
struct phl_module_op_info{
u32 op_code;
u8* inbuf;
u8* outbuf;
u32 inlen;
u32 outlen;
};
/**
* phl_cmd_token_req - request foramt for applying token of a specific cmd
* dispatcher.
* cmd token request is regarded as foreground module, thus,
* need to contend for cmd token.
* Normally, these req would be linked to a specific wifi role
* and acquiring RF resource for a specific task.
*
* @module_id: starting from PHL_FG_MDL_START
* @priv: private context from requestor
* @role: designated role info associated with current request.
* -----------------------------------------
* regarding on "return code" for following ops, refer to enum phl_mdl_ret_code
* -----------------------------------------
* @acquired: notify requestor when cmd token has acquired for this cmd and
cannot have any I/O operation.
* @abort: notify requestor when cmd has been canceled
after calling rtw_phl_phy_cancel_token_req and
cannot have any I/O operation.
* @msg_hdlr: notify requestor about incoming msg.
* @set_info: notify requestor to handle specific op code.
* @query_info: notify requestor to handle specific op code.
*/
struct phl_cmd_token_req{
u8 module_id;
void* priv;
void* role;
enum phl_mdl_ret_code (*acquired)(void* dispr, void* priv);
enum phl_mdl_ret_code (*abort)(void* dispr, void* priv);
enum phl_mdl_ret_code (*msg_hdlr)(void* dispr, void* priv,
struct phl_msg* msg);
enum phl_mdl_ret_code (*set_info)(void* dispr, void* priv,
struct phl_module_op_info* info);
enum phl_mdl_ret_code (*query_info)(void* dispr, void* priv,
struct phl_module_op_info* info);
};
/**
* phl_module_ops - standard interface for interacting with a cmd dispatcher.
* -----------------------------------------
* regarding on "return code" for following ops, refer to enum phl_mdl_ret_code
* -----------------------------------------
* @init: notify module for initialization.
* @deinit: notify module for de-initialization.
* @start: notify module to start.
* @stop: notify module to stop.
* @msg_hdlr: notify module about incoming msg.
* @set_info: notify module to handle specific op code.
* @query_info: notify module to handle specific op code.
*/
struct phl_bk_module_ops {
enum phl_mdl_ret_code (*init)(void* phl_info, void* dispr, void** priv);
void (*deinit)(void* dispr, void* priv);
enum phl_mdl_ret_code (*start)(void* dispr, void* priv);
enum phl_mdl_ret_code (*stop)(void* dispr, void* priv);
enum phl_mdl_ret_code (*msg_hdlr)(void* dispr, void* priv,
struct phl_msg* msg);
enum phl_mdl_ret_code (*set_info)(void* dispr, void* priv,
struct phl_module_op_info* info);
enum phl_mdl_ret_code (*query_info)(void* dispr, void* priv,
struct phl_module_op_info* info);
};
/**
* phl_data_ctl_t - datapath control parameters for dispatcher controller
* @cmd: data path control command
* @id: module id which request data path control
*/
struct phl_data_ctl_t {
enum phl_data_ctl_cmd cmd;
enum phl_module_id id;
};
#define MSG_MDL_ID_FIELD(_msg_id) (((_msg_id) >> 16) & 0xFF)
#define MSG_EVT_ID_FIELD(_msg_id) ((_msg_id) & 0xFFFF)
#define MSG_INDC_FIELD(_msg_id) (((_msg_id) >> 24) & 0xFF)
#define IS_PRIVATE_MSG(_msg_id) ((_msg_id) & PRIVATE_EVT_START)
#define IS_MSG_FAIL(_msg_id) ((_msg_id) & ( MSG_INDC_FAIL << 24))
#define IS_MSG_IN_PRE_PHASE(_msg_id) ((_msg_id) & ( MSG_INDC_PRE_PHASE << 24))
#define IS_MSG_CANCEL(_msg_id) ((_msg_id) & ( MSG_INDC_CANCEL << 24))
#define IS_MSG_CANNOT_IO(_msg_id) ((_msg_id) & ( MSG_INDC_CANNOT_IO << 24))
#define SET_MSG_MDL_ID_FIELD(_msg_id, _id) \
((_msg_id) = (((_msg_id) & 0xFF00FFFF) | ((u32)(_id) << 16)))
#define SET_MSG_EVT_ID_FIELD(_msg_id, _id) \
((_msg_id) = (((_msg_id) & 0xFFFF0000) | (_id)))
#define SET_MSG_INDC_FIELD(_msg_id, _indc) \
((_msg_id) = (((_msg_id) & ~((u32)(_indc) << 24))|((u32)(_indc) << 24)))
#define CLEAR_MSG_INDC_FIELD(_msg_id, _indc) ((_msg_id) &= ~((_indc) << 24))
#define RTW_MAX_FW_SIZE 0x400000
enum rtw_fw_src {
RTW_FW_SRC_INTNAL, /* 0 */
RTW_FW_SRC_EXTNAL, /* 1 */
RTW_FW_SRC_MAX
};
enum rtw_fw_rsn {
RTW_FW_RSN_INIT, /* 0 */
RTW_FW_RSN_SPIC, /* 1 */
RTW_FW_RSN_LPS, /* 2 */
RTW_FW_RSN_MCC, /* 3 */
RTW_FW_RSN_WOW, /* 4 */
RTW_FW_RSN_MAX
};
struct rtw_fw_cap_t {
enum rtw_fw_src fw_src;
u32 offload_cap;
u8 dlram_en;
u8 dlrom_en;
};
#define INVALID_WIFI_ROLE_IDX MAX_WIFI_ROLE_NUMBER
#define UNSPECIFIED_ROLE_ID 0xFF
#define MAX_SECCAM_NUM_PER_ENTRY 7
/* Role hw TX CAP*/
struct role_cap_t {
enum wlan_mode wmode;
enum channel_width bw;
u8 rty_lmt; /* retry limit for DATA frame, 0xFF: invalid */
u8 rty_lmt_rts; /* retry limit for RTS frame, 0xFF: invalid */
u8 tx_num_ampdu;
u8 tx_amsdu_in_ampdu; /*from SW & HW*/
u8 tx_ampdu_len_exp; /*from SW & HW*/
u8 tx_htc;
u8 tx_sgi;
u8 tx_ht_ldpc:1;
u8 tx_vht_ldpc:1;
u8 tx_he_ldpc:1;
u8 tx_ht_stbc:1;
u8 tx_vht_stbc:1;
u8 tx_he_stbc:1;
u8 supported_rates[12];
};
struct role_sw_cap_t {
u16 bf_cap; /* use define : HW_CAP_BFER_XX_XX */
u16 stbc_cap;/* use define: HW_CAP_STBC_XX */
};
/*
Protocol - RX CAP from 80211 PKT,
driver TX related function need to
reference __rx__ of rtw_phl_stainfo_t->asoc_cap
*/
struct protocol_cap_t {
/* MAC related */
u16 bcn_interval; /* beacon interval */
u8 num_ampdu;
u8 ampdu_density:3; /* rx ampdu cap */
u8 ampdu_len_exp; /* rx ampdu cap */
u8 amsdu_in_ampdu:1; /* rx ampdu cap */
u8 max_amsdu_len:2; /* 0: 4k, 1: 8k, 2: 11k */
u8 htc_rx:1;
u8 sm_ps:2;
u8 trig_padding:2;
u8 twt:6;
u8 all_ack:1;
u8 a_ctrl:4;
u8 ops:1;
u8 ht_vht_trig_rx:1;
u8 bsscolor;
u16 rts_th:10;
u8 short_slot:1; /* Short Slot Time */
u8 preamble:1; /* Preamble, 0: long, 1: short */
u8 sgi_20:1; /* HT Short GI for 20 MHz */
u8 sgi_40:1; /* HT Short GI for 40 MHz */
u8 sgi_80:1; /* VHT Short GI for 80 MHz */
u8 sgi_160:1; /* VHT Short GI for 160/80+80 MHz */
struct rtw_edca_param edca[4]; /* Access Category, 0:BE, 1:BK, 2:VI, 3:VO */
u8 mu_qos_info;
struct rtw_mu_edca_param mu_edca[4];
/* BB related */
u8 ht_ldpc:1;
u8 vht_ldpc:1;
u8 he_ldpc:1;
u8 he_su_bfmr:1;
u8 he_su_bfme:1;
u8 he_mu_bfmr:1;
u8 he_mu_bfme:1;
u8 bfme_sts:3;
u8 num_snd_dim:3;
u8 ht_su_bfmr:1;
u8 ht_su_bfme:1;
u8 vht_su_bfmr:1;
u8 vht_su_bfme:1;
u8 vht_mu_bfmr:1;
u8 vht_mu_bfme:1;
u8 ht_vht_ng:2;
u8 ht_vht_cb:2;
/*
* supported_rates: Supported data rate of CCK/OFDM.
* The rate definition follow Wi-Fi spec, unit is 500kb/s,
* and the MSB(bit 7) represent basic rate.
* ex. CCK 2Mbps not basic rate is encoded as 0x04,
* and OFDM 6M basic rate is encoded as 0x8c.
* Suppose rates come from Supported Rates and Extended Supported
* Rates IE.
* Value 0 means it is end of array, and no more valid data rate follow.
*/
u8 supported_rates[12];
u8 ht_rx_mcs[4];
u8 ht_tx_mcs[4];
u8 ht_basic_mcs[4]; /* Basic rate of HT */
u8 vht_rx_mcs[2];
u8 vht_tx_mcs[2];
u8 vht_basic_mcs[2]; /* Basic rate of VHT */
u8 he_rx_mcs[6];/*80,160,80+80*/
u8 he_tx_mcs[6];/*80,160,80+80*/
u8 he_basic_mcs[2]; /* Basic rate of HE */
u8 stbc_ht_rx:2;
u8 stbc_vht_rx:3;
u8 stbc_he_rx:1;
u8 stbc_tx:1;
u8 stbc_ht_tx:1;
u8 stbc_vht_tx:1;
u8 stbc_he_tx:1;
u8 ltf_gi;
u8 doppler_tx:1;
u8 doppler_rx:1;
u8 dcm_max_const_tx:2;
u8 dcm_max_nss_tx:1;
u8 dcm_max_const_rx:2;
u8 dcm_max_nss_rx:1;
u8 partial_bw_su_in_mu:1;
u8 bfme_sts_greater_80mhz:3;
u8 num_snd_dim_greater_80mhz:3;
u8 stbc_tx_greater_80mhz:1;
u8 stbc_rx_greater_80mhz:1;
u8 ng_16_su_fb:1;
u8 ng_16_mu_fb:1;
u8 cb_sz_su_fb:1;
u8 cb_sz_mu_fb:1;
u8 trig_su_bfm_fb:1;
u8 trig_mu_bfm_fb:1;
u8 trig_cqi_fb:1;
u8 partial_bw_su_er:1;
u8 pkt_padding:2;
u8 ppe_thr[8][4];
u8 pwr_bst_factor:1;
u8 max_nc:3;
u8 dcm_max_ru:2;
u8 long_sigb_symbol:1;
u8 non_trig_cqi_fb:1;
u8 tx_1024q_ru:1;
u8 rx_1024q_ru:1;
u8 fbw_su_using_mu_cmprs_sigb:1;
u8 fbw_su_using_mu_non_cmprs_sigb:1;
u8 er_su:1;
u8 tb_pe:3;
u16 txop_du_rts_th;
u8 he_rx_ndp_4x32:1;
/* RF related */
u8 nss_tx:3;
u8 nss_rx:3;
u8 num_ampdu_bk;
};
#define LOAD_MAC_REG_FILE BIT0
#define LOAD_BB_PHY_REG_FILE BIT1
#define LOAD_BB_PHY_REG_MP_FILE BIT2
#define LOAD_RF_RADIO_FILE BIT3
#define LOAD_RF_TXPWR_BY_RATE BIT4
#define LOAD_RF_TXPWR_TRACK_FILE BIT5
#define LOAD_RF_TXPWR_LMT_FILE BIT6
#define LOAD_RF_TXPWR_LMT_RU_FILE BIT7
#define LOAD_BB_PHY_REG_GAIN_FILE BIT8
#define PHL_UNDEFINED_SW_CAP 0xFF
struct rtw_pcie_ltr_lat_ctrl {
enum rtw_pcie_bus_func_cap_t ctrl;
u32 val;
};
enum rtw_pcie_ltr_state {
RTW_PCIE_LTR_SW_ACT = 1,
RTW_PCIE_LTR_SW_IDLE = 2
};
struct bus_sw_cap_t {
#ifdef CONFIG_PCI_HCI
enum rtw_pcie_bus_func_cap_t l0s_ctrl;
enum rtw_pcie_bus_func_cap_t l1_ctrl;
enum rtw_pcie_bus_func_cap_t l1ss_ctrl;
enum rtw_pcie_bus_func_cap_t wake_ctrl;
enum rtw_pcie_bus_func_cap_t crq_ctrl;
u32 txbd_num;
u32 rxbd_num;
u32 rpbd_num;
u32 rxbuf_num;
u32 rpbuf_num;
u8 clkdly_ctrl;
u8 l0sdly_ctrl;
u8 l1dly_ctrl;
struct rtw_pcie_ltr_lat_ctrl ltr_act;
struct rtw_pcie_ltr_lat_ctrl ltr_idle;
u8 ltr_init_state;
u16 ltr_sw_ctrl_thre; /* [15:8] tx [7:0] rx */
u8 ltr_sw_ctrl;
u8 ltr_hw_ctrl;
u32 ltr_last_trigger_time;
u32 ltr_sw_act_tri_cnt;
u32 ltr_sw_idle_tri_cnt;
u8 ltr_cur_state;
#elif defined (CONFIG_USB_HCI)
u32 tx_buf_size;
u32 tx_buf_num;
u32 tx_mgnt_buf_size;
u32 tx_mgnt_buf_num;
u32 tx_h2c_buf_num;
u32 rx_buf_size;
u32 rx_buf_num;
u32 in_token_num;
#elif defined(CONFIG_SDIO_HCI)
u32 tx_buf_size;
u32 tx_buf_num;
u32 tx_mgnt_buf_size;
u32 tx_mgnt_buf_num;
u32 rx_buf_size;
u32 rx_buf_num;
#else
u8 temp_for_struct_empty; /* for undefined interface */
#endif
};
struct bus_cap_t {
#ifdef CONFIG_PCI_HCI
enum rtw_pcie_bus_func_cap_t l0s_ctrl;
enum rtw_pcie_bus_func_cap_t l1_ctrl;
enum rtw_pcie_bus_func_cap_t l1ss_ctrl;
enum rtw_pcie_bus_func_cap_t wake_ctrl;
enum rtw_pcie_bus_func_cap_t crq_ctrl;
u32 txbd_num;
u32 rxbd_num;
u32 rpbd_num;
u32 rxbuf_num;
u32 rpbuf_num;
u8 clkdly_ctrl;
u8 l0sdly_ctrl;
u8 l1dly_ctrl;
struct rtw_pcie_ltr_lat_ctrl ltr_act;
struct rtw_pcie_ltr_lat_ctrl ltr_idle;
u8 ltr_init_state;
u8 ltr_sw_ctrl;
u8 ltr_hw_ctrl;
#elif defined (CONFIG_USB_HCI)
u32 tx_buf_size;
u32 tx_buf_num;
u32 tx_mgnt_buf_size;
u32 tx_mgnt_buf_num;
u32 tx_h2c_buf_num;
u32 rx_buf_size;
u32 rx_buf_num;
u32 in_token_num;
#elif defined(CONFIG_SDIO_HCI)
u32 tx_buf_size;
u32 tx_buf_num;
u32 tx_mgnt_buf_size;
u32 tx_mgnt_buf_num;
u32 rx_buf_size;
u32 rx_buf_num;
#else
u8 temp_for_struct_empty; /* for undefined interface */
#endif
};
#ifdef CONFIG_PHL_TWT
#define DELETE_ALL 0xFF
#define IGNORE_CFG_ID 0xFF
#define IGNORE_MACID 0xFF
enum rtw_phl_twt_sup_cap {
RTW_PHL_TWT_REQ_SUP = BIT(0), /* REQUESTER */
RTW_PHL_TWT_RSP_SUP = BIT(1)/* RESPONDER */
};
enum rtw_phl_nego_type {
RTW_PHL_INDIV_TWT = 0, /*individual TWT*/
RTW_PHL_WAKE_TBTT_INR = 1, /*wake TBTT and wake interval*/
RTW_PHL_BCAST_TWT = 2, /*Broadcast TWT*/
RTW_PHL_MANAGE_BCAST_TWT = 3 /*Manage memberships in broadcast TWT schedules*/
};
enum rtw_phl_wake_dur_unit{ /*wake duration unit*/
RTW_PHL_WAKE_256US = 0,
RTW_PHL_WAKE_1TU = 1
};
enum rtw_phl_setup_cmd{
RTW_PHL_REQUEST_TWT = 0,
RTW_PHL_SUGGEST_TWT = 1,
RTW_PHL_DEMAND_TWT = 2,
RTW_PHL_TWT_GROUPING = 3,
RTW_PHL_ACCEPT_TWT = 4,
RTW_PHL_ALTERNATE_TWT = 5,
RTW_PHL_DICTATE_TWT = 6,
RTW_PHL_REJECT_TWT = 7
};
enum rtw_phl_flow_type{
RTW_PHL_ANNOUNCED_TWT = 0,
RTW_PHL_UNANNOUNCED_TWT = 1
};
enum rtw_phl_twt_sta_action {
TWT_STA_NONE = 0,
TWT_STA_ADD_MACID = 1,
TWT_STA_DEL_MACID = 2,
TWT_STA_TETMINATW_SP = 3,
TWT_STA_SUSPEND_TWT = 4,
TWT_STA_RESUME_TWT = 5
};
enum rtw_phl_twt_cfg_action {
TWT_CFG_ADD = 0,
TWT_CFG_DELETE = 1,
TWT_CFG_MODIFY = 2
};
struct rtw_phl_twt_flow_type01 {
u8 twt_flow_id;
u8 teardown_all;
};
struct rtw_phl_twt_flow_type2 {
u8 reserved;
};
struct rtw_phl_twt_flow_type3 {
u8 bcast_twt_id;
u8 teardown_all;
};
struct rtw_phl_twt_flow_field{
enum rtw_phl_nego_type nego_type;
union {
struct rtw_phl_twt_flow_type01 twt_flow01;
struct rtw_phl_twt_flow_type2 twt_flow2;
struct rtw_phl_twt_flow_type3 twt_flow3;
} info;
};
/*phl_twt_setup_info Start*/
/*Broadcast TWT Parameter Set field*/
struct rtw_phl_bcast_twt_para_set{
u8 reserved; /*todo*/
};
/*Individual TWT Parameter Set field*/
struct rtw_phl_twt_group_asgmt{
u8 reserved; /*todo*/
};
struct rtw_phl_req_type_indiv{
enum rtw_phl_setup_cmd twt_setup_cmd; /*twt setup command*/
enum rtw_phl_flow_type flow_type;
u8 twt_request;
u8 trigger;
u8 implicit;
u8 twt_flow_id;
u8 twt_wake_int_exp;/*twt wake interval exponent*/
u8 twt_protection;
};
struct rtw_phl_indiv_twt_para_set{
struct rtw_phl_req_type_indiv req_type;
struct rtw_phl_twt_group_asgmt twt_group_asgmt; /* twt group assignment*/
u32 target_wake_t_h; /* if contain twt_group_assignment then don't contain target_wake_time*/
u32 target_wake_t_l;
u16 twt_wake_int_mantissa; /*twt wake interval mantissa*/
u8 nom_min_twt_wake_dur; /*nominal minimum twt wake duration*/
u8 twt_channel;
};
struct rtw_phl_twt_control{
enum rtw_phl_nego_type nego_type; /*negotiation type*/
enum rtw_phl_wake_dur_unit wake_dur_unit; /*wake duration unit*/
u8 ndp_paging_indic; /*ndp paging indicator*/
u8 responder_pm_mode;
u8 twt_info_frame_disable; /*twt information frame disable*/
};
struct rtw_phl_twt_element{
/* element info*/
/*control filed*/
struct rtw_phl_twt_control twt_ctrl;
/*twt para info*/
union {
struct rtw_phl_indiv_twt_para_set i_twt_para_set;
struct rtw_phl_bcast_twt_para_set b_twt_para_set;
} info;
};
struct rtw_phl_twt_setup_info{
struct rtw_phl_twt_element twt_element;
//struct rtw_phl_stainfo_t *phl_sta; //sta entry
u8 dialog_token;
};
/*phl_twt_setup_info End*/
/*phl_twt_info Start*/
struct rtw_twt_sta_info{
_os_list list;
struct rtw_phl_stainfo_t *phl_sta; /*sta entry*/
u8 id; /*twt_flow_identifier or broadcast_twt_id*/
};
struct rtw_phl_twt_info{
enum rtw_phl_wake_dur_unit wake_dur_unit;
enum rtw_phl_nego_type nego_type;
enum rtw_phl_flow_type flow_type;
u8 twt_id; /*config id*/
u8 bcast_twt_id; /*ignore in individual TWT*/
u8 twt_action;
u8 responder_pm_mode;
u8 trigger;
u8 implicit_lastbcast; /*implicit or lastbroadcast*/
u8 twt_protection;
u8 twt_wake_int_exp;
u8 nom_min_twt_wake_dur;
u16 twt_wake_int_mantissa;
u32 target_wake_time_h;
u32 target_wake_time_l;
};
#endif /* CONFIG_PHL_TWT */
enum rtw_lps_listen_bcn_mode {
RTW_LPS_RLBM_MIN = 0,
RTW_LPS_RLBM_MAX = 1,
RTW_LPS_RLBM_USERDEFINE = 2,
RTW_LPS_LISTEN_BCN_MAX,
};
enum rtw_lps_smart_ps_mode {
RTW_LPS_LEGACY_PWR1 = 0,
RTW_LPS_TRX_PWR0 = 1,
RTW_LPS_SMART_PS_MAX,
};
struct rtw_wow_cap_t {
u8 magic_sup;
u8 pattern_sup;
u8 ping_pattern_wake_sup;
u8 arp_ofld_sup;
u8 ns_oflod_sup;
u8 gtk_ofld_sup;
};
/**
* enum phl_ps_leave_fail_act decide the action when leave ps fail
* BIT 0 : reject all subsequent power request
* BIT 1 : trigger L2 reset
*/
enum phl_ps_leave_fail_act {
PS_LEAVE_FAIL_ACT_REJ_PWR = BIT0,
PS_LEAVE_FAIL_ACT_L2 = BIT1
};
#define PS_LEAVE_FAIL_ACT_NONE 0
enum phl_ps_operation_mode {
PS_OP_MODE_DISABLED = 0,
PS_OP_MODE_FORCE_ENABLED = 1,
PS_OP_MODE_AUTO = 2
};
enum phl_ps_pwr_lvl {
PS_PWR_LVL_PWROFF = 0, /* hal deinit */
PS_PWR_LVL_PWR_GATED = 1, /* FW control*/
PS_PWR_LVL_CLK_GATED = 2, /* FW control*/
PS_PWR_LVL_RF_OFF = 3, /* FW control*/
PS_PWR_LVL_PWRON = 4, /* hal init */
PS_PWR_LVL_MAX
};
/**
* enum phl_stop_rson record the reason to stop power saving
* BIT 0 : by core initialization setting
* BIT 1 : by debug flow setting
* BIT 2 : by battery change
*/
enum phl_ps_rt_rson {
PS_RT_DEBUG = BIT0,
PS_RT_CORE_INIT = BIT1,
PS_RT_BATTERY_CHG = BIT2,
};
#define PS_RT_RSON_NONE 0
#define PS_CAP_PWRON BIT0
#define PS_CAP_RF_OFF BIT1
#define PS_CAP_CLK_GATED BIT2
#define PS_CAP_PWR_GATED BIT3
#define PS_CAP_PWR_OFF BIT4
/**
* ips_en/lps_en
* refs. structure "phl_ps_operation_mode"
* 0: disable -> disable all ps mechanism
* 1: force enable -> ignore all other condition, force enter ps
* 2: auto -> will be affected by runtime capability set by core
*
* ips_cap/ips_wow_cap/lps_cap/lps_wow_cap are bit defined
* corresponding bit is set if specific power level is supported
* BIT0: Power on
* BIT1: Rf off
* BIT2: Clock gating
* BIT3: Power gating
* BIT4: Power off
*/
struct rtw_ps_cap_t {
/* rf state */
enum rtw_rf_state init_rf_state;
u8 init_rt_stop_rson;
u8 leave_fail_act; /* action when leave ps fail */
/* ips */
u8 ips_en;
u8 ips_cap;
u8 ips_wow_en;
u8 ips_wow_cap;
/* lps */
u8 lps_en;
u8 lps_cap;
u8 lps_awake_interval;
enum rtw_lps_listen_bcn_mode lps_listen_bcn_mode;
enum rtw_lps_smart_ps_mode lps_smart_ps_mode;
u8 lps_rssi_enter_threshold;
u8 lps_rssi_leave_threshold;
u8 lps_rssi_diff_threshold;
bool lps_pause_tx;
/* wow lps */
u8 lps_wow_en;
u8 lps_wow_cap;
u8 lps_wow_awake_interval;
enum rtw_lps_listen_bcn_mode lps_wow_listen_bcn_mode;
enum rtw_lps_smart_ps_mode lps_wow_smart_ps_mode;
};
struct rtw_edcca_cap_t {
u8 edcca_adap_th_2g;
u8 edcca_adap_th_5g;
u8 edcca_carrier_sense_th;
};
struct phy_sw_cap_t {
struct rtw_para_info_t mac_reg_info;
struct rtw_para_info_t bb_phy_reg_info;
struct rtw_para_info_t bb_phy_reg_mp_info;
struct rtw_para_info_t bb_phy_reg_gain_info;
struct rtw_para_info_t rf_radio_a_info;
struct rtw_para_info_t rf_radio_b_info;
struct rtw_para_info_t rf_txpwr_byrate_info;
struct rtw_para_info_t rf_txpwrtrack_info;
struct rtw_para_pwrlmt_info_t rf_txpwrlmt_info;
struct rtw_para_pwrlmt_info_t rf_txpwrlmt_ru_info;
u8 proto_sup;
u8 band_sup;
u8 bw_sup;
u8 txss;
u8 rxss;
u16 hw_rts_time_th;
u16 hw_rts_len_th;
bool bfreed_para;
};
/* final capability of phy */
struct phy_cap_t {
u8 proto_sup;
u8 band_sup;
u8 bw_sup;
u8 txss;
u8 rxss;
u16 hw_rts_time_th;
u16 hw_rts_len_th;
};
/* final capability of device */
struct dev_cap_t {
u64 hw_sup_flags;/*hw's feature support flags*/
#ifdef RTW_WKARD_LAMODE
bool la_mode;
#endif
u8 pkg_type;
u8 rfe_type;
u8 bypass_rfe_chk;
u8 xcap;
struct rtw_fw_cap_t fw_cap;
#ifdef CONFIG_MCC_SUPPORT
bool mcc_sup;
#endif
#ifdef CONFIG_DBCC_SUPPORT
bool dbcc_sup;
#endif
#ifdef CONFIG_PHL_TWT
u8 twt_sup;
#endif /* CONFIG_PHL_TWT */
struct rtw_wow_cap_t wow_cap;
struct rtw_ps_cap_t ps_cap;
u8 hw_hdr_conv;
u8 domain;
u8 btc_mode;
u8 ap_ps; /* support for AP mode PS in PHL */
u8 pwrbyrate_off;
u8 pwrlmt_type;
u8 rf_board_opt;
u8 sta_ulru; /* support UL OFDAM for STA mode (reply trigger frame) */
#ifdef RTW_WKARD_BB_DISABLE_STA_2G40M_ULOFDMA
u8 sta_ulru_2g40mhz; /* when "sta_ulru" is enabled, support UL OFDAM on 2.4G 40MHz ? */
#endif
u8 tx_mu_ru;
struct rtw_edcca_cap_t edcca_cap;
#ifdef CONFIG_LOAD_PHY_PARA_FROM_FILE
bool bfree_para_info; /* keep load file para info buf,default 0*/
#endif
u8 hw_stype_cap;
u8 wl_func_cap;
u8 rpq_agg_num; /* 0: no adjust, use mac default size: 121 */
};
#ifdef RTW_PHL_BCN //phl def
#define BCN_ID_MAX (0xFF)
#define MAX_BCN_SIZE 1000
enum bcn_offload_flags{
BCN_HW_SEQ = 0,
BCN_HW_TIM,
BCN_HW_MAX = 32,
};
struct rtw_bcn_info_cmn {
u8 role_idx;
u8 bcn_id;
u8 bcn_added;
u8 bssid[6];
u32 bcn_interval;
u8 bcn_buf[MAX_BCN_SIZE];
u32 bcn_length;
u32 bcn_rate;
u32 bcn_dtim;
u32 ie_offset_tim;
u32 bcn_offload;
};
struct rtw_bcn_info_hw {
u8 band;
u8 port;
u8 mbssid;
u8 mac_id;
};
struct rtw_bcn_entry {
_os_list list;
struct rtw_bcn_info_cmn *bcn_cmn; //fill by core
struct rtw_bcn_info_hw bcn_hw; //fill by phl //?? void mapping ?? for 8852, 8834 ...blabla
};
#endif
struct rtw_phl_com_t;
struct phl_msg_receiver {
void* priv;
void (*incoming_evt_notify)(void* priv, struct phl_msg *msg);
};
#ifdef CONFIG_PHL_P2PPS
#define MAX_NOA_DESC 5
#define NOAID_NONE 0xFF
enum p2pps_trig_tag {
P2PPS_TRIG_GO = 0,
P2PPS_TRIG_GC = 1,
P2PPS_TRIG_GC_255 = 2,
P2PPS_TRIG_MCC = 3,
P2PPS_TRIG_2G_SCC_1AP_1STA_BT = 4,
P2PPS_TRIG_MAX = MAX_NOA_DESC
};
struct rtw_phl_noa_desc {
u8 enable; /*false=disable, true=enable*/
struct rtw_wifi_role_t *w_role;
enum p2pps_trig_tag tag;
u32 start_t_h;
u32 start_t_l;
u32 interval;
u32 duration;
u8 count;
u8 noa_id; /*filed by phl noa module*/
};
struct rtw_phl_opps_desc {
u16 ctw;
u8 all_slep;
};
struct rtw_phl_tsf32_tog_rpt{
u8 band;
u8 port;
u8 valid;
u16 early;
u16 status;
u32 tsf_l;
u32 tsf_h;
};
struct rtw_phl_p2pps_ops {
void *priv; /* ops private, define by core layer*/
void (*tsf32_tog_update_noa)(void *priv, struct rtw_wifi_role_t *w_role,
struct rtw_phl_tsf32_tog_rpt *rpt);
void (*tsf32_tog_update_single_noa)(void *priv,
struct rtw_wifi_role_t *w_role,
struct rtw_phl_noa_desc *desc);
};
#endif
struct rtw_wifi_role_t {
struct rtw_phl_com_t *phl_com;/*point to phl_com*/
#ifdef RTW_WKARD_ROLE_TYPE
enum role_type real_type;
#endif /* RTW_WKARD_ROLE_TYPE */
enum role_type type;/*will mapping to net type*/
enum role_type target_type;
#ifdef RTW_WKARD_PHL_NTFY_MEDIA_STS
bool is_gc;
#endif
enum mlme_state mstate;
bool active;
enum wr_status status;
u8 id;/* recode role_idx in phl_com */
u8 hw_wmm; /*HW EDCA - wmm0 or wmm1*/
#ifdef RTW_WKARD_HW_WMM_ALLOCATE
_os_atomic hw_wmm0_ref_cnt;
#endif
u8 mac_addr[MAC_ALEN];
u8 hw_band; /*MAC Band0 or Band1*/
u8 hw_port; /*MAC HW Port*/
/*
* final protocol capability of role from intersection of
* sw role cap, sw protocol cap and hw protocol cap
*/
struct protocol_cap_t proto_role_cap;
/*
* final capability of role from intersection of
* sw role cap, final phy cap and final dev cap
*/
struct role_cap_t cap;
/*#ifdef CONFIG_AP*/
#ifdef RTW_PHL_BCN
struct rtw_bcn_info_cmn bcn_cmn; //todo: ieee mbssid case & multi-bcn (in one iface) case
u8 hw_mbssid;
#endif
u8 dtim_period;
u8 mbid_num;
u32 hiq_win;
/*#endif CONFIG_AP*/
struct rtw_chan_def chandef;
struct rtw_chan_ctx *chanctx;/*point to chanctx*/
struct phl_queue assoc_sta_queue;
#ifdef CONFIG_PHL_TWT
struct rtw_phl_twt_setup_info twt_setup_info;
#endif /* CONFIG_PHL_TWT */
#ifdef CONFIG_PHL_P2PPS
struct rtw_phl_noa_desc noa_desc[MAX_NOA_DESC];
#endif
void *core_data; /* Track back to counter part in core layer */
#ifdef RTW_WKARD_BFEE_SET_AID
u16 last_set_aid;
#endif
};
#define TXTP_CALC_DIFF_MS 1000
#define RXTP_CALC_DIFF_MS 1000
#define TX_ULTRA_LOW_TP_THRES_KBPS 100
#define RX_ULTRA_LOW_TP_THRES_KBPS 100
#define TX_LOW_TP_THRES_MBPS 2
#define RX_LOW_TP_THRES_MBPS 2
#define TX_MID_TP_THRES_MBPS 10
#define RX_MID_TP_THRES_MBPS 10
#define TX_HIGH_TP_THRES_MBPS 50
#define RX_HIGH_TP_THRES_MBPS 50
enum rtw_tfc_lvl {
RTW_TFC_IDLE = 0,
RTW_TFC_ULTRA_LOW = 1,
RTW_TFC_LOW = 2,
RTW_TFC_MID = 3,
RTW_TFC_HIGH = 4,
RTW_TFC_LVL_MAX = 0xFF
};
enum rtw_tfc_sts {
TRAFFIC_CHANGED = BIT0,
TRAFFIC_INCREASE = BIT1,
TRAFFIC_DECREASE = BIT2,
TRAFFIC_STS_MAX = BIT7
};
struct rtw_traffic_t {
enum rtw_tfc_lvl lvl;
enum rtw_tfc_sts sts;
};
struct rtw_stats_tp {
u64 last_calc_bits;
u32 last_calc_time_ms;
};
/*statistic*/
struct rtw_stats {
u64 tx_byte_uni;/*unicast tx byte*/
u64 rx_byte_uni;/*unicast rx byte*/
u64 tx_byte_total;
u64 rx_byte_total;
u32 tx_tp_kbits;
u32 rx_tp_kbits;
u16 tx_moving_average_tp; /* tx average MBps*/
u16 rx_moving_average_tp; /* rx average MBps*/
u32 last_tx_time_ms;
u32 last_rx_time_ms;
u32 txreq_num;
u32 rx_rate;
u32 rx_rate_nmr[RTW_DATA_RATE_HE_NSS4_MCS11 +1];
u64 ser_event[8]; /* RTW_PHL_SER_MAX */
struct rtw_stats_tp txtp;
struct rtw_stats_tp rxtp;
struct rtw_traffic_t tx_traffic;
struct rtw_traffic_t rx_traffic;
u32 rx_tf_cnt; /* rx trigger frame number (accumulated, only reset in disconnect) */
};
enum sta_chg_id {
STA_CHG_BW,
STA_CHG_NSS,
STA_CHG_RAMASK,
STA_CHG_SEC_MODE,
STA_CHG_MBSSID,
STA_CHG_RA_GILTF,
STA_CHG_MAX
};
enum phl_upd_mode {
PHL_UPD_ROLE_CREATE,
PHL_UPD_ROLE_REMOVE,
PHL_UPD_ROLE_TYPE_CHANGE,
PHL_UPD_ROLE_INFO_CHANGE,
PHL_UPD_STA_INFO_CHANGE,
PHL_UPD_STA_CON_DISCONN,
PHL_UPD_ROLE_MAX
};
#ifdef CONFIG_PHL_TXSC
#define PHL_TXSC_ENTRY_NUM 8
#define MAX_WD_SIZE 128
struct phl_txsc_entry {
bool txsc_wd_cached;
u8 txsc_wd_cache[MAX_WD_SIZE];
u8 txsc_wd_len;
u32 txsc_cache_hit;
};
#endif
struct rtw_hal_stainfo_t;
struct rtw_phl_stainfo_t {
_os_list list;
struct rtw_wifi_role_t *wrole;
bool active;
u16 aid;
u16 macid;
u8 mac_addr[MAC_ALEN];
struct rtw_chan_def chandef;
struct rtw_stats stats;
enum wlan_mode wmode;
/*mlme protocol or MAC related CAP*/
u8 bcn_hit_cond;
u8 hit_rule;
u8 tf_trs;
u8 tgt_ind;
u8 frm_tgt_ind;
u8 addr_sel;
u8 addr_msk;
/* rx agg */
struct phl_tid_ampdu_rx *tid_rx[8]; /* TID_MAX_NUM */
_os_lock tid_rx_lock; /* guarding @tid_rx */
_os_event comp_sync; /* reorder timer completion event */
_os_timer reorder_timer; /* reorder timer for all @tid_rx of the
* stainfo */
/* TODO: add missing part */
/*mlme protocol or PHY related CAP*/
struct protocol_cap_t asoc_cap;
enum rtw_protect_mode protect;
/*security related*/
u8 wapi;
u8 sec_mode;
/*
* STA powersave, those could be implemented as bit flags but there's no
* corresponding atomic bit operations available on Windows.
*/
_os_atomic ps_sta; /* the sta is in PS mode or not */
struct rtw_hal_stainfo_t *hal_sta;
#ifdef CONFIG_PHL_TXSC
struct phl_txsc_entry phl_txsc[PHL_TXSC_ENTRY_NUM];
#endif
struct rtw_rx_bcn_info bcn_i;
void *core_data; /* Track back to counter part in core layer */
};
#define WL_FUNC_P2P BIT0
#define WL_FUNC_MIRACAST BIT1
#define WL_FUNC_TDLS BIT2
#define WL_FUNC_FTM BIT3
#define WL_FUNC_BIT_NUM 4
/* HW MAC capability*/
#define HW_SUP_DBCC BIT0
#define HW_SUP_AMSDU BIT1
#define HW_SUP_TCP_TX_CHKSUM BIT2
#define HW_SUP_TCP_RX_CHKSUM BIT3
#define HW_SUP_TXPKT_CONVR BIT4
#define HW_SUP_RXPKT_CONVR BIT5
#define HW_SUP_MULTI_BSSID BIT6
#define HW_SUP_OFDMA BIT7
#define HW_SUP_CHAN_INFO BIT8
#define HW_SUP_TSSI BIT9
#define HW_SUP_TANK_K BIT10
/*BUS Section CAP */
#define HW_SUP_PCIE_PLFH BIT20 /*payload from host*/
#define HW_SUP_USB_MULTI_FUN BIT21
#define HW_SUP_SDIO_MULTI_FUN BIT22
/* Beamform CAP */
#define HW_CAP_BF_NON_SUPPORT 0
#define HW_CAP_BFEE_HT_SU BIT(0)
#define HW_CAP_BFER_HT_SU BIT(1)
#define HW_CAP_BFEE_VHT_SU BIT(2)
#define HW_CAP_BFER_VHT_SU BIT(3)
#define HW_CAP_BFEE_VHT_MU BIT(4)
#define HW_CAP_BFER_VHT_MU BIT(5)
#define HW_CAP_BFEE_HE_SU BIT(6)
#define HW_CAP_BFER_HE_SU BIT(7)
#define HW_CAP_BFEE_HE_MU BIT(8)
#define HW_CAP_BFER_HE_MU BIT(9)
#define HW_CAP_HE_NON_TB_CQI BIT(10)
#define HW_CAP_HE_TB_CQI BIT(11)
#define RTW_HW_CAP_ULRU_AUTO 0
#define RTW_HW_CAP_ULRU_DISABLE 1
#define RTW_HW_CAP_ULRU_ENABLE 2
/* STBC CAP */
#define HW_CAP_STBC_HT_TX BIT(0)
#define HW_CAP_STBC_VHT_TX BIT(1)
#define HW_CAP_STBC_HE_TX BIT(2)
#define HW_CAP_STBC_HE_TX_GT_80M BIT(3)
#define HW_CAP_STBC_HT_RX BIT(4)
#define HW_CAP_STBC_VHT_RX BIT(5)
#define HW_CAP_STBC_HE_RX BIT(6)
#define HW_CAP_STBC_HE_RX_GT_80M BIT(7)
struct hal_spec_t {
char *ic_name;
u16 macid_num;
u8 sec_cam_ent_num;
u8 sec_cap;
u8 wow_cap;
u8 rfpath_num_2g:4; /* used for tx power index path */
u8 rfpath_num_5g:4; /* used for tx power index path */
u8 rf_reg_path_num;
u8 max_tx_cnt;
u8 tx_nss_num:4;
u8 rx_nss_num:4;
u8 band_cap; /* value of BAND_CAP_XXX */
u8 bw_cap; /* value of BW_CAP_XXX */
u8 port_num;
u8 wmm_num;
u8 proto_cap; /* value of PROTO_CAP_XXX */
u8 wl_func; /* value of WL_FUNC_XXX */
/********* xmit ************/
/********* recv ************/
u8 rx_bd_info_sz;
u16 rx_tag[2];
#ifdef CONFIG_USB_HCI
u8 max_bulkin_num;
u8 max_bulkout_num;
#endif
#ifdef CONFIG_PCI_HCI
u16 txbd_multi_tag;
u8 txbd_upd_lmt;
#ifdef RTW_WKARD_BUSCAP_IN_HALSPEC
u8 phyaddr_num;
#endif
#endif
u8 cts2_thres_en;
u16 cts2_thres;
/********* beamformer ************/
u8 max_csi_buf_su_nr;
u8 max_csi_buf_mu_nr;
u8 max_bf_ent_nr;
u8 max_su_sta_nr;
u8 max_mu_sta_nr;
};
#define phl_get_hci_type(_phlcom) (_phlcom->hci_type)
#define phl_get_ic_spec(_phlcom) (&_phlcom->hal_spec)
#define phl_get_fw_buf(_phlcom) (_phlcom->fw_info.ram_buff)
#define phl_get_fw_size(_phlcom) (_phlcom->fw_info.ram_size)
enum rtw_drv_mode {
RTW_DRV_MODE_NORMAL = 0,
RTW_DRV_MODE_EQC = 1,
RTW_DRV_MODE_HIGH_THERMAL = 2,
/* 11~20 for MP submodule section*/
RTW_DRV_MODE_MP_SMDL_START = 11,
RTW_DRV_MODE_MP = 11,
RTW_DRV_MODE_HOMOLOGATION = 12,
RTW_DRV_MODE_MP_SMDL_END = 20,
/* 21~30 for FPGA submodule section*/
RTW_DRV_MODE_FPGA_SMDL_START = 21,
RTW_DRV_MODE_FPGA_SMDL_END = 30,
/* 31~60 for VERIFY submodule section*/
RTW_DRV_MODE_VERIFY_SMDL_START = 31,
RTW_DRV_MODE_VERIFY_SMDL_END = 60,
/* 61~80 for TOOL submodule section*/
RTW_DRV_MODE_TOOL_SMDL_START = 61,
RTW_DRV_MODE_TOOL_SMDL_END = 80,
/* Fixed Max Value*/
RTW_DRV_MODE_MAX = 255
};
struct rtw_evt_info_t {
_os_lock evt_lock;
enum rtw_phl_evt evt_bitmap;
};
// WiFi FW
struct rtw_fw_info_t {
u8 fw_en;
u8 fw_src;
u8 fw_type;
u8 dlram_en;
u8 dlrom_en;
u8 *rom_buff;
u32 rom_addr;
u32 rom_size;
char rom_path[256];
u8 *ram_buff;
u32 ram_size;
char ram_path[256];
u8 *buf;
u32 buf_size;
u8 *wow_buf;
u32 wow_buf_size;
u8 *sym_buf;
u32 sym_buf_size;
};
#ifdef CONFIG_PHL_DFS
enum dfs_regd_t {
DFS_REGD_UNKNOWN = 0,
DFS_REGD_FCC = 1,
DFS_REGD_JAP = 2,
DFS_REGD_ETSI = 3,
};
struct rtw_dfs_t {
u8 region_domain;
bool dfs_enabled;
};
#endif
#ifdef CONFIG_PHL_CHANNEL_INFO
#define CHAN_INFO_MAX_SIZE 65535
#define MAX_CHAN_INFO_PKT_KEEP 2
#define CHAN_INFO_PKT_TOTAL MAX_CHAN_INFO_PKT_KEEP + 1
struct csi_header_t {
u8 mac_addr[6]; /* mdata: u8 ta[6]? */
u32 hw_assigned_timestamp; /* mdata: u32 freerun_cnt */
u8 channel; /* Drv define */
u8 bandwidth; /* mdata: u8 bw */
u16 rx_data_rate; /* mdata: u16 rx_rate */
u8 nc; /* ch_rpt_hdr_info */
u8 nr; /* ch_rpt_hdr_info */
u16 num_sub_carrier; /* Drv define*/
u8 num_bit_per_tone; /* Drv define per I/Q */
u8 avg_idle_noise_pwr; /* ch_rpt_hdr_info */
u8 evm[2]; /* ch_rpt_hdr_info */
u8 rssi[2]; /* phy_info_rpt */
u32 csi_data_length; /* ch_rpt_hdr_info */
u8 rxsc; /* phy_info_rpt */
u8 ch_matrix_report; /* mdata: u8 get_ch_info */
u8 csi_valid; /* ch_rpt_hdr_info */
};
struct chan_info_t {
_os_list list;
u8* chan_info_buffer;
u32 length;
struct csi_header_t csi_header;
};
struct rx_chan_info_pool {
struct chan_info_t channl_info_pkt[CHAN_INFO_PKT_TOTAL];
_os_list idle;
_os_list busy;
_os_lock idle_lock; /* spinlock */
_os_lock busy_lock; /* spinlock */
u32 idle_cnt;
u32 busy_cnt;
};
#endif /* CONFIG_PHL_CHANNEL_INFO */
#ifdef CONFIG_MCC_SUPPORT
#define BT_SEG_NUM 2
#define SLOT_NUM 4
#define MIN_TDMRA_SLOT_NUM 2
#define NONSPECIFIC_SETTING 0xff
/*Export to core layer. Phl get the judgement of slot mode*/
enum rtw_phl_mcc_coex_mode {
RTW_PHL_MCC_COEX_MODE_NONE = 0,
RTW_PHL_MCC_COEX_MODE_BT_MASTER,
RTW_PHL_MCC_COEX_MODE_WIFI_MASTER,
RTW_PHL_MCC_COEX_MODE_BT_WIFI_BALANCE
};
enum rtw_phl_tdmra_wmode {
RTW_PHL_TDMRA_WMODE_NONE = 0,
RTW_PHL_TDMRA_AP_CLIENT_WMODE,
RTW_PHL_TDMRA_2CLIENTS_WMODE,
RTW_PHL_TDMRA_AP_WMODE,
RTW_PHL_TDMRA_UNKNOWN_WMODE
};
enum rtw_phl_mcc_dbg_type {
MCC_DBG_NONE = 0,
MCC_DBG_STATE,
MCC_DBG_OP_MODE,
MCC_DBG_COEX_MODE,
MCC_DBG_BT_INFO,
MCC_DBG_EN_INFO
};
enum rtw_phl_mcc_state {
MCC_NONE = 0,
MCC_CFG_EN_INFO,
MCC_TRIGGER_FW_EN,
MCC_FW_EN_FAIL,
MCC_RUNING,
MCC_TRIGGER_FW_DIS,
MCC_FW_DIS_FAIL,
MCC_STOP
};
enum rtw_phl_mcc_dur_lim_tag {
RTW_MCC_DUR_LIM_NONE = 0,
RTW_MCC_DUR_LIM_NOA
};
/*Export to core layer and hal layyer. Phl get the c2h report mode and config to halmac*/
enum rtw_phl_mcc_rpt {
RTW_MCC_RPT_OFF = 0,
RTW_MCC_RPT_FAIL_ONLY,
RTW_MCC_RPT_ALL
};
/*Export to core layer. Phl get switch ch setting of role from core layer*/
struct rtw_phl_mcc_setting_info {
struct rtw_wifi_role_t *wrole;
u8 role_map;/*the wifi role map in operating mcc */
u8 tx_null_early;
u16 dur; /*core specific duration in a period of 100 ms */
bool en_fw_mcc_log;
u8 fw_mcc_log_lv;/* fw mcc log level */
};
/*Export to core layer. Core get NOA info to update p2p beacon*/
struct rtw_phl_mcc_noa {
struct rtw_wifi_role_t *wrole;
u32 start_t_h;
u32 start_t_l;
u16 dur;
u16 interval;
u8 cnt;
};
struct rtw_phl_mcc_ops {
void *priv; /* ops private, define by core layer*/
int (*mcc_update_noa)(void *priv, struct rtw_phl_mcc_noa *param);
int (*mcc_get_setting)(void *priv, struct rtw_phl_mcc_setting_info *param);
};
/*
* Export to phl layer and hal layer.
* Record the debug info.
*/
struct rtw_phl_mcc_dbg_slot_info {
bool bt_role;
u16 dur;
u16 ch;
u16 macid;
};
struct rtw_phl_mcc_dbg_hal_info {
u8 slot_num;
struct rtw_phl_mcc_dbg_slot_info dbg_slot_i[SLOT_NUM];
bool btc_in_group;
};
struct rtw_phl_mcc_macid_bitmap {
u32 *bitmap;
u8 len;
};
struct rtw_phl_mcc_sync_tsf_info {
u8 sync_en;
u16 source;
u16 target;
u16 offset;
};
struct rtw_phl_mcc_dur_lim_info {
bool enable;
enum rtw_phl_mcc_dur_lim_tag tag;
u16 max_tob;
u16 max_toa;
u16 max_dur;
};
struct rtw_phl_mcc_dur_info {
u16 dur;
struct rtw_phl_mcc_dur_lim_info dur_limit;
};
struct rtw_phl_mcc_policy_info {
u8 c2h_rpt;
u8 tx_null_early;
u8 dis_tx_null;
u8 in_curr_ch;
u8 dis_sw_retry;
u8 sw_retry_count;
struct rtw_phl_mcc_dur_info dur_info;
u8 rfk_chk;
u8 protect_bcn;
u8 courtesy_en;
u8 courtesy_num;
u8 courtesy_target;
};
struct rtw_phl_mcc_role {
struct rtw_wifi_role_t *wrole;
struct rtw_phl_mcc_macid_bitmap used_macid;
struct rtw_chan_def *chandef;
struct rtw_phl_mcc_policy_info policy;
u16 macid;
u16 bcn_intvl;
bool bt_role;
u8 group;
};
/*
* @c_en: Enable courtesy function
* @c_num: the time slot of src_role replace by tgt_role
*/
struct rtw_phl_mcc_courtesy {
bool c_en;
bool c_num;
struct rtw_phl_mcc_role *src_role;
struct rtw_phl_mcc_role *tgt_role;
};
/*
* @slot: duration, unit: TU
* @bt_role: True: bt role, False: Wifi role
* @mrole: mcc role info for Wifi Role
*/
struct rtw_phl_mcc_slot_info {
u16 slot;
bool bt_role;
struct rtw_phl_mcc_role *mrole;
};
/*
* @slot_num: total slot num(Wifi+BT)
* @bt_slot_num: total BT slot num
* | Dur1 | Dur2 |
* bcn bcn
* |tob_r | toa_r|tob_a | toa_a|
*/
struct rtw_phl_mcc_pattern {
u8 slot_num;
u8 bt_slot_num;
struct rtw_phl_mcc_role *role_ref;
struct rtw_phl_mcc_role *role_ano;
s16 tob_r;
s16 toa_r;
s16 tob_a;
s16 toa_a;
u16 bcns_offset;
u16 calc_fail;
/**
* |tob_r|toa_r|
* -----------<d_r_d_a_spacing>-----------
* |tob_a|toa_a|
**/
u16 d_r_d_a_spacing_max;
struct rtw_phl_mcc_courtesy courtesy_i;
/*record slot order for X wifi slot + Y bt slot*/
struct rtw_phl_mcc_slot_info slot_order[SLOT_NUM];
};
/*
* Enable info for mcc
* @ref_role_idx: the index of reference role
* @mrole_map: use mcc role num
* @mrole_num: use mcc role num
* @group: assigned by halmac mcc, the group resource of fw feture, phl layer ignore it
* fw mcc can handle differenec slot pattern, and the group is the id of slot pattern.
* @tsf_high, tsf_low: Start TSF
* @tsf_high_l, tsf_low_l: Limitation of Start TSF
* @dbg_hal_i: Debug info for hal mcc
*/
struct rtw_phl_mcc_en_info {
struct rtw_phl_mcc_role mcc_role[MCC_ROLE_NUM];
struct rtw_phl_mcc_sync_tsf_info sync_tsf_info;
struct rtw_phl_mcc_pattern m_pattern;
u8 ref_role_idx;
u8 mrole_map;
u8 mrole_num;
u8 group;
u16 mcc_intvl;
u32 tsf_high;
u32 tsf_low;
u32 tsf_high_l;
u32 tsf_low_l;
struct rtw_phl_mcc_dbg_hal_info dbg_hal_i;
};
/*
* Bt info
* @bt_dur: bt slot
* @bt_seg: segment bt slot
* @bt_seg_num: segment num
* @add_bt_role: if add_bt_role = true, we need to add bt slot to fw
*/
struct rtw_phl_mcc_bt_info {
u16 bt_dur;
u16 bt_seg[BT_SEG_NUM];
u8 bt_seg_num;
bool add_bt_role;
};
enum rtw_phl_mcc_chk_inprocess_type {
RTW_PHL_MCC_CHK_INPROGRESS = 0,
RTW_PHL_MCC_CHK_INPROGRESS_SINGLE_CH,
RTW_PHL_MCC_CHK_INPROGRESS_MULTI_CH,
RTW_PHL_MCC_CHK_MAX,
};
enum mr_coex_trigger {
MR_COEX_TRIG_BY_BT,
MR_COEX_TRIG_BY_LINKING,
MR_COEX_TRIG_BY_DIS_LINKING,
MR_COEX_TRIG_BY_CHG_SLOT,
MR_COEX_TRIG_BY_SCAN,
MR_COEX_TRIG_BY_ECSA,
MR_COEX_TRIG_BY_CHG_OP_CHDEF,
MR_COEX_TRIG_MAX,
};
#endif /* CONFIG_MCC_SUPPORT */
/*multi-roles control components*/
enum mr_op_mode {
MR_OP_NON,
MR_OP_SCC,
MR_OP_MCC,
MR_OP_MAX,
};
enum mr_op_type {
MR_OP_TYPE_NONE,
MR_OP_TYPE_STATION_ONLY,
MR_OP_TYPE_AP_ONLY,
MR_OP_TYPE_STATION_AP,
MR_OP_TYPE_MAX,
};
struct mr_info {
u8 sta_num;
u8 ld_sta_num;
u8 lg_sta_num; /* WIFI_STATION_STATE && WIFI_UNDER_LINKING */
u8 ap_num;
u8 ld_ap_num; /*&& asoc_sta_count > 2*/
u8 monitor_num;
u8 p2p_device_num;
u8 p2p_gc_num;
u8 p2p_go_num;
#ifdef CONFIG_PHL_TDLS
u8 ld_tdls_num; /* phl_role->type == PHL_RTYPE_TDLS */
#endif
#if 0
#ifdef CONFIG_AP_MODE
u8 starting_ap_num; /*WIFI_FW_AP_STATE*/
#endif
u8 adhoc_num; /* (WIFI_ADHOC_STATE | WIFI_ADHOC_MASTER_STATE) && WIFI_ASOC_STATE */
u8 ld_adhoc_num; /* (WIFI_ADHOC_STATE | WIFI_ADHOC_MASTER_STATE) && WIFI_ASOC_STATE && asoc_sta_count > 2 */
#ifdef CONFIG_RTW_MESH
u8 mesh_num; /* WIFI_MESH_STATE && WIFI_ASOC_STATE */
u8 ld_mesh_num; /* WIFI_MESH_STATE && WIFI_ASOC_STATE && asoc_sta_count > 2 */
#endif
#endif
};
enum mr_coex_mode {
MR_COEX_MODE_NONE = 0,
MR_COEX_MODE_2GSCC_1AP_1STA_BTC = 1,
MR_COEX_MODE_TDMRA = 2
};
/*export to core layer*/
struct mr_query_info {
struct mr_info cur_info;
enum mr_op_mode op_mode;
enum mr_op_type op_type;
};
struct hw_band_ctl_t {
_os_lock lock;
u8 id;
u8 port_map; /*used port_idx*/
u8 role_map; /*used role_idx*/
u8 wmm_map;
struct mr_info cur_info;
enum mr_op_mode op_mode;
enum mr_op_type op_type;
enum phl_hw_port tsf_sync_port;
struct phl_queue chan_ctx_queue;/*struct rtw_chan_ctx*/
enum mr_coex_mode coex_mode;
#ifdef CONFIG_MCC_SUPPORT
void *mcc_info; /*struct phl_mcc_info*/
#endif
};
#define MAX_BAND_NUM 2
struct rtw_hal_com_t;
#ifdef CONFIG_PHL_P2PPS
struct rtw_phl_noa {
struct rtw_wifi_role_t *wrole;
enum p2pps_trig_tag tag;
u32 start_t_h;
u32 start_t_l;
u16 dur;
u8 cnt;
u16 interval;
};
#endif
struct rtw_phl_mr_ops {
void *priv; /* ops private, define by core layer*/
#ifdef CONFIG_PHL_P2PPS
int (*phl_mr_update_noa)(void *priv, struct rtw_phl_noa *param);
#endif
#ifdef CONFIG_MCC_SUPPORT
struct rtw_phl_mcc_ops *mcc_ops;
#endif
};
struct mr_ctl_t {
struct rtw_hal_com_t *hal_com;
_os_lock lock;
struct hw_band_ctl_t band_ctrl[MAX_BAND_NUM];
struct phl_bk_module_ops bk_ops;
u8 role_map;
bool is_sb;
struct rtw_phl_mr_ops mr_ops;
#ifdef CONFIG_MCC_SUPPORT
u8 init_mcc;
void *com_mcc;/*struct phl_com_mcc_info*/
#endif
};
enum rtw_rssi_type {
RTW_RSSI_DATA_ACAM,
RTW_RSSI_DATA_ACAM_A1M,
RTW_RSSI_DATA_OTHER,
RTW_RSSI_CTRL_ACAM,
RTW_RSSI_CTRL_ACAM_A1M,
RTW_RSSI_CTRL_OTHER,
RTW_RSSI_MGNT_ACAM,
RTW_RSSI_MGNT_ACAM_A1M,
RTW_RSSI_MGNT_OTHER,
RTW_RSSI_UNKNOWN,
RTW_RSSI_TYPE_MAX
};
#define PHL_MAX_RSSI 110
#define PHL_RSSI_MAVG_NUM 16
#define UPDATE_MA_RSSI(_RSSI, _TYPE, _VAL) \
do { \
u8 oldest_rssi = 0; \
if(_RSSI->ma_rssi_ele_idx[_TYPE] < PHL_RSSI_MAVG_NUM) { \
oldest_rssi = _RSSI->ma_rssi_ele[_TYPE][\
_RSSI->ma_rssi_ele_idx[_TYPE]]; \
_RSSI->ma_rssi_ele[_TYPE][_RSSI->ma_rssi_ele_idx[_TYPE]] = \
((_VAL > PHL_MAX_RSSI) ? PHL_MAX_RSSI : _VAL ); \
} else { \
_RSSI->ma_rssi_ele_idx[_TYPE] = 0; \
oldest_rssi = _RSSI->ma_rssi_ele[_TYPE][0]; \
_RSSI->ma_rssi_ele[_TYPE][0] = \
((_VAL > PHL_MAX_RSSI) ? PHL_MAX_RSSI : _VAL ); \
} \
_RSSI->ma_rssi_ele_sum[_TYPE] -= oldest_rssi;\
_RSSI->ma_rssi_ele_sum[_TYPE] += \
((_VAL > PHL_MAX_RSSI) ? PHL_MAX_RSSI : _VAL ); \
_RSSI->ma_rssi_ele_idx[_TYPE]++; \
if(_RSSI->ma_rssi_ele_cnt[_TYPE] < PHL_RSSI_MAVG_NUM) \
_RSSI->ma_rssi_ele_cnt[_TYPE]++; \
_RSSI->ma_rssi[_TYPE] = (u8)(_RSSI->ma_rssi_ele_sum[_TYPE] / \
_RSSI->ma_rssi_ele_cnt[_TYPE]);\
} while (0)
#define PHL_TRANS_2_RSSI(X) (X >> 1)
struct rtw_phl_rssi_stat {
_os_lock lock;
u8 ma_rssi_ele_idx[RTW_RSSI_TYPE_MAX];
u8 ma_rssi_ele_cnt[RTW_RSSI_TYPE_MAX]; /* maximum : PHL_RSSI_MAVG_NUM */
u8 ma_rssi_ele[RTW_RSSI_TYPE_MAX][PHL_RSSI_MAVG_NUM]; /* rssi element for moving average */
u32 ma_rssi_ele_sum[RTW_RSSI_TYPE_MAX];
u8 ma_rssi[RTW_RSSI_TYPE_MAX]; /* moving average : 0~PHL_MAX_RSSI (dBm = rssi - PHL_MAX_RSSI) */
};
#define PHL_MAX_PPDU_CNT 8
#define PHL_MAX_PPDU_STA_NUM 4
struct rtw_phl_ppdu_sts_sta_ent {
u8 vld;
/*u8 rssi;*/
u16 macid;
};
struct rtw_phl_ppdu_phy_info {
bool is_valid;
u8 rssi; /*signal power : 0 - PHL_MAX_RSSI, rssi dbm = PHL_MAX_RSSI - value*/
u8 rssi_path[RTW_PHL_MAX_RF_PATH];/*PATH A, PATH B ... PATH D*/
u8 ch_idx;
u8 tx_bf;
u8 frame_type; /* type + subtype */
};
#ifdef CONFIG_PHY_INFO_NTFY
struct rtw_phl_ppdu_sts_ntfy {
bool vld;
u8 frame_type;
u8 src_mac_addr[MAC_ADDRESS_LENGTH];
struct rtw_phl_ppdu_phy_info phy_info;
};
#endif
struct rtw_phl_ppdu_sts_ent {
/* from normal data */
u8 frame_type;
u8 addr_cam_vld;
u8 crc32;
u8 ppdu_type;
u16 rx_rate;
u8 src_mac_addr[MAC_ADDRESS_LENGTH];
/* from ppdu status */
bool valid;
bool phl_done;
u8 usr_num;
u32 freerun_cnt;
struct rtw_phl_ppdu_phy_info phy_info;
struct rtw_phl_ppdu_sts_sta_ent sta[PHL_MAX_PPDU_STA_NUM];
#ifdef CONFIG_PHL_RX_PSTS_PER_PKT
/* for ppdu status per pkt */
struct phl_queue frames;
#endif
};
struct rtw_phl_ppdu_sts_info {
struct rtw_phl_ppdu_sts_ent sts_ent[HW_BAND_MAX][PHL_MAX_PPDU_CNT];
u8 cur_rx_ppdu_cnt[HW_BAND_MAX];
bool en_ppdu_sts[HW_BAND_MAX];
bool latest_rx_is_psts[HW_BAND_MAX];
#ifdef CONFIG_PHL_RX_PSTS_PER_PKT
bool en_psts_per_pkt;
bool psts_ampdu;
#define RTW_PHL_PSTS_FLTR_MGNT BIT(RTW_FRAME_TYPE_MGNT)
#define RTW_PHL_PSTS_FLTR_CTRL BIT(RTW_FRAME_TYPE_CTRL)
#define RTW_PHL_PSTS_FLTR_DATA BIT(RTW_FRAME_TYPE_DATA)
#define RTW_PHL_PSTS_FLTR_EXT_RSVD BIT(RTW_FRAME_TYPE_EXT_RSVD)
u8 ppdu_sts_filter;
u8 en_fake_psts;
u8 cur_ppdu_cnt[HW_BAND_MAX];
#endif
#ifdef CONFIG_PHY_INFO_NTFY
#define MAX_PSTS_MSG_AGGR_NUM 10
struct rtw_phl_ppdu_sts_ntfy msg_aggr_buf[MAX_PSTS_MSG_AGGR_NUM];
u8 msg_aggr_cnt;
#endif
};
struct rtw_phl_gid_pos_tbl {
#define RTW_VHT_GID_MGNT_FRAME_GID_SZ 8
#define RTW_VHT_GID_MGNT_FRAME_POS_SZ 16
u8 gid_vld[RTW_VHT_GID_MGNT_FRAME_GID_SZ]; /* from 0 - 63 */
u8 pos[RTW_VHT_GID_MGNT_FRAME_POS_SZ]; /* 0 - 63, per 2 bit*/
};
struct rtw_iot_t {
u32 id;
};
#ifdef CONFIG_PHL_THERMAL_PROTECT
enum phl_thermal_protect_action{
PHL_THERMAL_PROTECT_ACTION_NONE = 0,
PHL_THERMAL_PROTECT_ACTION_LEVEL1 = 1,
PHL_THERMAL_PROTECT_ACTION_LEVEL2 = 2,
};
#endif
struct rtw_phl_evt_ops;
struct rtw_phl_com_t {
struct rtw_wifi_role_t wifi_roles[MAX_WIFI_ROLE_NUMBER];
struct mr_ctl_t mr_ctrl; /*multi wifi_role control module*/
struct rtw_phl_evt_ops evt_ops;
enum rtw_hci_type hci_type;
enum rtw_drv_mode drv_mode;/*normal or mp mode*/
enum rtw_dev_state dev_state;
struct hal_spec_t hal_spec;
struct role_sw_cap_t role_sw_cap; /* SW control capability of role for any purpose */
struct protocol_cap_t proto_sw_cap[2]; /* SW control wifi protocol capability for any purpose */
struct phy_sw_cap_t phy_sw_cap[2]; /* SW control phy capability for any purpose */
struct phy_cap_t phy_cap[2]; /* final capability of phy (intersection of sw/hw) */
struct dev_cap_t dev_sw_cap;
struct dev_cap_t dev_cap; /* final capability of device (intersection of sw/hw) */
struct bus_sw_cap_t bus_sw_cap; /* SW controlled bus capability */
struct rtw_fw_info_t fw_info;
struct rtw_evt_info_t evt_info;
struct rtw_stats phl_stats;
#ifdef CONFIG_PHL_DFS
struct rtw_dfs_t dfs_info;
#endif
struct rtw_iot_t id;
/* Flags to control/check RX packets */
bool append_fcs;
bool accept_icv_err;
u8 tx_nss; /*tx Spatial Streams - GET_HAL_TX_NSS, get_min from registery and hal_spec*/
u8 rx_nss; /*rx Spatial Streams - GET_HAL_RX_NSS, get_min from registery and hal_spec*/
u8 rf_type; /*enum rf_type , is RF_PATH - GET_HAL_RFPATH*/
u8 rf_path_num; /*GET_HAL_RFPATH_NUM*/
u8 regulation; /*regulation*/
u8 edcca_mode;
#ifdef CONFIG_PHL_CHANNEL_INFO
struct rx_chan_info_pool *chan_info_pool;
struct chan_info_t *chan_info; /* Handle channel info packet */
#endif /* CONFIG_PHL_CHANNEL_INFO */
void *p2pps_info;
struct rtw_phl_ppdu_sts_info ppdu_sts_info;
struct rtw_phl_rssi_stat rssi_stat;
#ifdef CONFIG_PHL_THERMAL_PROTECT
enum phl_thermal_protect_action thermal_protect_action;
#endif
void *test_mgnt;
void *phl_priv; /* pointer to phl_info */
void *drv_priv;
#ifdef RTW_WKARD_BFEE_SET_AID
u8 is_in_lps;
#endif
};
struct phl_sec_param_h {
u8 keyid;
u8 enc_type;
u8 key_type;
u8 key_len;
u8 spp;
};
#define PHL_MAX_AGG_WSIZE 32
struct mp_usr_sw_tx_gen_in {
u32 data_rate : 9;
u32 mcs : 6;
u32 mpdu_len : 14;
u32 n_mpdu : 9;
u32 fec : 1;
u32 dcm : 1;
u32 rsvd0 : 1;
u32 aid : 12;
u32 scrambler_seed : 8; // rand (1~255)
u32 random_init_seed : 8; // rand (1~255)
u32 rsvd1 : 4;
u32 apep : 22;
u32 ru_alloc : 8;
u32 rsvd2 : 2;
u32 nss : 4;
u32 txbf : 1;
u32 pwr_boost_db : 5;
u32 rsvd3 : 22;
};
struct mp_sw_tx_param_t {
u32 dbw : 2; //0:BW20, 1:BW40, 2:BW80, 3:BW160/BW80+80
u32 source_gen_mode : 2;
u32 locked_clk : 1;
u32 dyn_bw : 1;
u32 ndp_en : 1;
u32 long_preamble_en : 1; //bmode
u32 stbc : 1;
u32 gi : 2; //0:0.4,1:0.8,2:1.6,3:3.2
u32 tb_l_len : 12;
u32 tb_ru_tot_sts_max : 3;
u32 vht_txop_not_allowed : 1;
u32 tb_disam : 1;
u32 doppler : 2;
u32 he_ltf_type : 2;//0:1x,1:2x,2:4x
u32 ht_l_len : 12;
u32 preamble_puncture : 1;
u32 he_mcs_sigb : 3;//0~5
u32 he_dcm_sigb : 1;
u32 he_sigb_compress_en : 1;
u32 max_tx_time_0p4us : 14;
u32 ul_flag : 1;
u32 tb_ldpc_extra : 1;
u32 bss_color : 6;
u32 sr : 4;
u32 beamchange_en : 1;
u32 he_er_u106ru_en : 1;
u32 ul_srp1 : 4;
u32 ul_srp2 : 4;
u32 ul_srp3 : 4;
u32 ul_srp4 : 4;
u32 mode : 2;
u32 group_id : 6;
u32 ppdu_type : 4;//0: bmode,1:Legacy,2:HT_MF,3:HT_GF,4:VHT,5:HE_SU,6:HE_ER_SU,7:HE_MU,8:HE_TB
u32 txop : 7;
u32 tb_strt_sts : 3;
u32 tb_pre_fec_padding_factor : 2;
u32 cbw : 2;
u32 txsc : 4;
u32 tb_mumimo_mode_en : 1;
u32 rsvd1 : 3;
u8 nominal_t_pe : 2; // def = 2
u8 ness : 2; // def = 0
u8 rsvd2 : 4;
u8 n_user;
u16 tb_rsvd : 9;//def = 0
u16 rsvd3 : 7;
struct mp_usr_sw_tx_gen_in usr[4];
};
struct mp_usr_plcp_gen_in {
u32 mcs : 6;
u32 mpdu_len : 14;
u32 n_mpdu : 9;
u32 fec : 1;
u32 dcm : 1;
u32 rsvd0 : 1;
u32 aid : 12;
u32 scrambler_seed : 8; // rand (1~255)
u32 random_init_seed : 8; // rand (1~255)
u32 rsvd1 : 4;
u32 apep : 22;
u32 ru_alloc : 8;
u32 rsvd2 : 2;
u32 nss : 4;
u32 txbf : 1;
u32 pwr_boost_db : 5;
u32 rsvd3 : 22;
};
enum pkt_ofld_type {
PKT_TYPE_PROBE_RSP = 0,
PKT_TYPE_PS_POLL = 1,
PKT_TYPE_NULL_DATA = 2,
PKT_TYPE_QOS_NULL = 3,
PKT_TYPE_CTS2SELF = 4,
PKT_TYPE_ARP_RSP = 5,
PKT_TYPE_NDP = 6,
PKT_TYPE_EAPOL_KEY = 7,
PKT_TYPE_SA_QUERY = 8,
PKT_TYPE_REALWOW_KAPKT = 9, /* RealWoW Keep Alive Packet */
PKT_TYPE_REALWOW_ACK = 10, /* RealWoW Ack Patten */
PKT_TYPE_REALWOW_WP = 11, /* RealWoW Wakeup Patten */
PKT_OFLD_TYPE_MAX,
};
struct mp_plcp_param_t {
u32 dbw : 2; //0:BW20, 1:BW40, 2:BW80, 3:BW160/BW80+80
u32 source_gen_mode : 2;
u32 locked_clk : 1;
u32 dyn_bw : 1;
u32 ndp_en : 1;
u32 long_preamble_en : 1; //bmode
u32 stbc : 1;
u32 gi : 2; //0:0.4,1:0.8,2:1.6,3:3.2
u32 tb_l_len : 12;
u32 tb_ru_tot_sts_max : 3;
u32 vht_txop_not_allowed : 1;
u32 tb_disam : 1;
u32 doppler : 2;
u32 he_ltf_type : 2;//0:1x,1:2x,2:4x
u32 ht_l_len : 12;
u32 preamble_puncture : 1;
u32 he_mcs_sigb : 3;//0~5
u32 he_dcm_sigb : 1;
u32 he_sigb_compress_en : 1;
u32 max_tx_time_0p4us : 14;
u32 ul_flag : 1;
u32 tb_ldpc_extra : 1;
u32 bss_color : 6;
u32 sr : 4;
u32 beamchange_en : 1;
u32 he_er_u106ru_en : 1;
u32 ul_srp1 : 4;
u32 ul_srp2 : 4;
u32 ul_srp3 : 4;
u32 ul_srp4 : 4;
u32 mode : 2;
u32 group_id : 6;
u32 ppdu_type : 4;//0: bmode,1:Legacy,2:HT_MF,3:HT_GF,4:VHT,5:HE_SU,6:HE_ER_SU,7:HE_MU,8:HE_TB
u32 txop : 7;
u32 tb_strt_sts : 3;
u32 tb_pre_fec_padding_factor : 2;
u32 cbw : 2;
u32 txsc : 4;
u32 tb_mumimo_mode_en : 1;
u32 rsvd1 : 3;
u8 nominal_t_pe : 2; // def = 2
u8 ness : 2; // def = 0
u8 rsvd2 : 4;
u8 n_user;
u16 tb_rsvd : 9;//def = 0
u16 rsvd3 : 7;
struct mp_usr_plcp_gen_in usr[4];
};
#define MP_MAC_AX_MAX_RU_NUM 4
struct mp_mac_ax_tf_depend_user_para {
u8 pref_AC: 2;
u8 rsvd: 6;
};
struct mp_mac_ax_tf_user_para {
u16 aid12: 12;
u16 ul_mcs: 4;
u8 macid;
u8 ru_pos;
u8 ul_fec_code: 1;
u8 ul_dcm: 1;
u8 ss_alloc: 6;
u8 ul_tgt_rssi: 7;
u8 rsvd: 1;
u16 rsvd2;
};
struct mp_mac_ax_tf_pkt_para {
u8 ul_bw: 2;
u8 gi_ltf: 2;
u8 num_he_ltf: 3;
u8 ul_stbc: 1;
u8 doppler: 1;
u8 ap_tx_power: 6;
u8 rsvd0: 1;
u8 user_num: 3;
u8 pktnum: 3;
u8 rsvd1: 2;
u8 pri20_bitmap;
struct mp_mac_ax_tf_user_para user[MP_MAC_AX_MAX_RU_NUM];
struct mp_mac_ax_tf_depend_user_para dep_user[MP_MAC_AX_MAX_RU_NUM];
};
struct mp_mac_ax_tf_wd_para {
u16 datarate: 9;
u16 mulport_id: 3;
u16 pwr_ofset: 3;
u16 rsvd: 1;
};
struct mp_mac_ax_f2p_test_para {
struct mp_mac_ax_tf_pkt_para tf_pkt;
struct mp_mac_ax_tf_wd_para tf_wd;
u8 mode: 2;
u8 frexch_type: 6;
u8 sigb_len;
};
struct mp_mac_ax_f2p_wd {
/* dword 0 */
u32 cmd_qsel:6;
u32 rsvd0:2;
u32 rsvd1:2;
u32 ls:1;
u32 fs:1;
u32 total_number:4;
u32 seq:8;
u32 length:8;
/* dword 1 */
u32 rsvd2;
};
struct mp_mac_ax_f2p_tx_cmd {
/* dword 0 */
u32 cmd_type:8;
u32 cmd_sub_type:8;
u32 dl_user_num:5;
u32 bw:2;
u32 tx_power:9;
/* dword 1 */
u32 fw_define:16;
u32 ss_sel_mode:2;
u32 next_qsel:6;
u32 twt_group:4;
u32 dis_chk_slp:1;
u32 ru_mu_2_su:1;
u32 dl_t_pe:2;
/* dword 2 */
u32 sigb_ch1_len:8;
u32 sigb_ch2_len:8;
u32 sigb_sym_num:6;
u32 sigb_ch2_ofs:5;
u32 dis_htp_ack:1;
u32 tx_time_ref:2;
u32 pri_user_idx:2;
/* dword 3 */
u32 ampdu_max_txtime:14;
u32 rsvd0:2;
u32 group_id:6;
u32 rsvd1:2;
u32 rsvd2:4;
u32 twt_chk_en:1;
u32 twt_port_id:3;
/* dword 4 */
u32 twt_start_time:32;
/* dword 5 */
u32 twt_end_time:32;
/* dword 6 */
u32 apep_len:12;
u32 tri_pad:2;
u32 ul_t_pe:2;
u32 rf_gain_idx:10;
u32 fixed_gain_en:1;
u32 ul_gi_ltf:3;
u32 ul_doppler:1;
u32 ul_stbc:1;
/* dword 7 */
u32 ul_mid_per:1;
u32 ul_cqi_rrp_tri:1;
u32 rsvd3:6;
u32 rsvd4:8;
u32 sigb_dcm:1;
u32 sigb_comp:1;
u32 doppler:1;
u32 stbc:1;
u32 mid_per:1;
u32 gi_ltf_size:3;
u32 sigb_mcs:3;
u32 rsvd5:5;
/* dword 8 */
u32 macid_u0:8;
u32 ac_type_u0:2;
u32 mu_sta_pos_u0:2;
u32 dl_rate_idx_u0:9;
u32 dl_dcm_en_u0:1;
u32 rsvd6:2;
u32 ru_alo_idx_u0:8;
/* dword 9 */
u32 pwr_boost_u0:5;
u32 agg_bmp_alo_u0:3;
u32 ampdu_max_txnum_u0:8;
u32 user_define_u0:8;
u32 user_define_ext_u0:8;
/* dword 10 */
u32 ul_addr_idx_u0:8;
u32 ul_dcm_u0:1;
u32 ul_fec_cod_u0:1;
u32 ul_ru_rate_u0:7;
u32 rsvd8:7;
u32 ul_ru_alo_idx_u0:8;
/* dword 11 */
u32 rsvd9:32;
/* dword 12 */
u32 macid_u1:8;
u32 ac_type_u1:2;
u32 mu_sta_pos_u1:2;
u32 dl_rate_idx_u1:9;
u32 dl_dcm_en_u1:1;
u32 rsvd10:2;
u32 ru_alo_idx_u1:8;
/* dword 13 */
u32 pwr_boost_u1:5;
u32 agg_bmp_alo_u1:3;
u32 ampdu_max_txnum_u1:8;
u32 user_define_u1:8;
u32 user_define_ext_u1:8;
/* dword 14 */
u32 ul_addr_idx_u1:8;
u32 ul_dcm_u1:1;
u32 ul_fec_cod_u1:1;
u32 ul_ru_rate_u1:7;
u32 rsvd12:7;
u32 ul_ru_alo_idx_u1:8;
/* dword 15 */
u32 rsvd13:32;
/* dword 16 */
u32 macid_u2:8;
u32 ac_type_u2:2;
u32 mu_sta_pos_u2:2;
u32 dl_rate_idx_u2:9;
u32 dl_dcm_en_u2:1;
u32 rsvd14:2;
u32 ru_alo_idx_u2:8;
/* dword 17 */
u32 pwr_boost_u2:5;
u32 agg_bmp_alo_u2:3;
u32 ampdu_max_txnum_u2:8;
u32 user_define_u2:8;
u32 user_define_ext_u2:8;
/* dword 18 */
u32 ul_addr_idx_u2:8;
u32 ul_dcm_u2:1;
u32 ul_fec_cod_u2:1;
u32 ul_ru_rate_u2:7;
u32 rsvd16:7;
u32 ul_ru_alo_idx_u2:8;
/* dword 19 */
u32 rsvd17:32;
/* dword 20 */
u32 macid_u3:8;
u32 ac_type_u3:2;
u32 mu_sta_pos_u3:2;
u32 dl_rate_idx_u3:9;
u32 dl_dcm_en_u3:1;
u32 rsvd18:2;
u32 ru_alo_idx_u3:8;
/* dword 21 */
u32 pwr_boost_u3:5;
u32 agg_bmp_alo_u3:3;
u32 ampdu_max_txnum_u3:8;
u32 user_define_u3:8;
u32 user_define_ext_u3:8;
/* dword 22 */
u32 ul_addr_idx_u3:8;
u32 ul_dcm_u3:1;
u32 ul_fec_cod_u3:1;
u32 ul_ru_rate_u3:7;
u32 rsvd20:7;
u32 ul_ru_alo_idx_u3:8;
/* dword 23 */
u32 rsvd21:32;
/* dword 24 */
u32 pkt_id_0:12;
u32 rsvd22:3;
u32 valid_0:1;
u32 ul_user_num_0:4;
u32 rsvd23:12;
/* dword 25 */
u32 pkt_id_1:12;
u32 rsvd24:3;
u32 valid_1:1;
u32 ul_user_num_1:4;
u32 rsvd25:12;
/* dword 26 */
u32 pkt_id_2:12;
u32 rsvd26:3;
u32 valid_2:1;
u32 ul_user_num_2:4;
u32 rsvd27:12;
/* dword 27 */
u32 pkt_id_3:12;
u32 rsvd28:3;
u32 valid_3:1;
u32 ul_user_num_3:4;
u32 rsvd29:12;
/* dword 28 */
u32 pkt_id_4:12;
u32 rsvd30:3;
u32 valid_4:1;
u32 ul_user_num_4:4;
u32 rsvd31:12;
/* dword 29 */
u32 pkt_id_5:12;
u32 rsvd32:3;
u32 valid_5:1;
u32 ul_user_num_5:4;
u32 rsvd33:12;
};
u8 mp_start(void *priv);
#ifdef CONFIG_DBCC_SUPPORT
enum dbcc_test_id {
DBCC_PRE_CFG,
DBCC_CFG,
DBCC_CLEAN_TXQ,
};
#endif
struct rtw_role_cmd {
struct rtw_wifi_role_t *wrole;
enum role_state rstate;
};
enum phl_btc_pkt_evt_type {
BTC_PKT_EVT_NORMAL,
BTC_PKT_EVT_DHCP,
BTC_PKT_EVT_ARP,
BTC_PKT_EVT_EAPOL,
BTC_PKT_EVT_EAPOL_START,
BTC_PKT_EVT_ADD_KEY,
BTC_PKT_EVT_MAX
};
struct rtw_pkt_evt_ntfy {
struct rtw_wifi_role_t *wrole;
enum phl_btc_pkt_evt_type type;
};
struct role_ntfy_info {
u8 role_id;
u16 macid;
enum role_state rstate;
};
struct battery_chg_ntfy_info {
bool ips_allow;
bool lps_allow;
};
struct ps_ntfy_info {
bool sync;
void *ctx;
void (*cb)(void *phl, void *hdl, void *ctx, enum rtw_phl_status stat);
};
struct set_rf_ntfy_info {
enum rtw_rf_state state_to_set;
_os_event done;
};
/**
* rtw_phl_rainfo - structure use to query RA information
* from hal layer to core/phl layer
* @rate: current rate selected by RA, define by general definition enum rtw_data_rate
* @bw: current BW, define by general definition enum channel_width
* @gi_ltf: current gi_ltf, define by general definition enum rtw_gi_ltf
*/
struct rtw_phl_rainfo {
enum rtw_data_rate rate;
enum channel_width bw;
enum rtw_gi_ltf gi_ltf;
};
struct rtw_pcie_trx_mit_info_t {
u32 tx_timer;
u8 tx_counter;
u32 rx_timer;
u8 rx_counter;
u8 fixed_mitigation; /*no watchdog dynamic setting*/
void *priv;
};
struct rtw_env_report {
bool rpt_status; /*1 means CCX_SUCCESS,0 means fail*/
u8 clm_ratio;
u8 nhm_ratio;
u8 nhm_pwr;
u8 nhm_cca_ratio;
};
enum rtw_phl_ser_lv1_recv_step {
RTW_PHL_SER_LV1_RCVY_STEP_1 = 0,
RTW_PHL_SER_LV1_SER_RCVY_STEP_2,
/* keep last */
RTW_PHL_SER_LV1_RCVY_STEP_LAST,
RTW_PHL_SER_LV1_RCVY_STEP_MAX = RTW_PHL_SER_LV1_RCVY_STEP_LAST,
RTW_PHL_SER_LV1_RCVY_STEP_INVALID = RTW_PHL_SER_LV1_RCVY_STEP_LAST,
};
#endif /*_PHL_DEF_H_*/
|
2301_81045437/rtl8852be
|
phl/phl_def.h
|
C
|
agpl-3.0
| 73,501
|
/******************************************************************************
*
* Copyright(c) 2020 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#define _PHL_DM_C_
#include "phl_headers.h"
void rtw_phl_set_edcca_mode(void *phl, enum rtw_edcca_mode mode)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
PHL_INFO("[Cert], set phl_com edcca_mode : %d !! \n", mode);
phl_info->phl_com->edcca_mode = mode;
}
enum rtw_edcca_mode rtw_phl_get_edcca_mode(void *phl)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
return phl_info->phl_com->edcca_mode;
}
#ifdef CONFIG_PCI_HCI
#ifdef RTW_WKARD_DYNAMIC_LTR
enum rtw_phl_status
phl_ltr_sw_trigger(struct rtw_phl_com_t *phl_com, void *hal,
enum rtw_pcie_ltr_state state)
{
enum rtw_hal_status status = RTW_HAL_STATUS_FAILURE;
struct bus_sw_cap_t *sw_cap = &phl_com->bus_sw_cap;
status = rtw_hal_ltr_sw_trigger(hal, state);
if (status == RTW_HAL_STATUS_SUCCESS) {
sw_cap->ltr_cur_state = state;
sw_cap->ltr_last_trigger_time = _os_get_cur_time_us();
state == RTW_PCIE_LTR_SW_ACT ?
sw_cap->ltr_sw_act_tri_cnt++ : sw_cap->ltr_sw_idle_tri_cnt++;
return RTW_PHL_STATUS_SUCCESS;
} else {
return RTW_PHL_STATUS_FAILURE;
}
}
/* Switching sw ctrl will trigger active ltr at the same time
to prevent inconsistent state */
/* usage : echo phl ltr set [enable/disable] */
enum rtw_phl_status
phl_ltr_sw_ctrl(struct rtw_phl_com_t *phl_com, void *hal, bool enable)
{
enum rtw_hal_status status = RTW_HAL_STATUS_FAILURE;
status = phl_ltr_sw_trigger(phl_com, hal, RTW_PCIE_LTR_SW_ACT);
if (status == RTW_HAL_STATUS_SUCCESS) {
phl_com->bus_sw_cap.ltr_sw_ctrl = enable;
return RTW_PHL_STATUS_SUCCESS;
} else {
return RTW_PHL_STATUS_FAILURE;
}
}
/* switch to hw control. it's valid that only hw supports hw mode */
/* usage : echo phl ltr set [enable/disable] */
void
phl_ltr_hw_ctrl(struct rtw_phl_com_t *phl_com, bool enable)
{
phl_com->bus_sw_cap.ltr_hw_ctrl = enable;
}
/* For pm module, this will not trigger active ltr since halmac will take care of*/
void phl_ltr_sw_ctrl_ntfy(struct rtw_phl_com_t *phl_com, bool enable)
{
phl_com->bus_sw_cap.ltr_sw_ctrl = enable;
}
u8 phl_ltr_get_cur_state(struct rtw_phl_com_t *phl_com)
{
return phl_com->bus_sw_cap.ltr_cur_state;
}
u32 phl_ltr_get_last_trigger_time(struct rtw_phl_com_t *phl_com)
{
return phl_com->bus_sw_cap.ltr_last_trigger_time;
}
u32 phl_ltr_get_tri_cnt(struct rtw_phl_com_t *phl_com,
enum rtw_pcie_ltr_state state)
{
struct bus_sw_cap_t *sw_cap = &phl_com->bus_sw_cap;
return state == RTW_PCIE_LTR_SW_ACT ?
sw_cap->ltr_sw_act_tri_cnt : sw_cap->ltr_sw_idle_tri_cnt;
}
#define TP_MBPS 100
void phl_ltr_ctrl_watchdog(struct phl_info_t *phl_info)
{
struct rtw_phl_com_t *phl_com = phl_info->phl_com;
struct rtw_stats *sts = &phl_com->phl_stats;
u32 tx_tp_m = 0, rx_tp_m = 0;
static bool start = false;
/* only valid if it's currenlty running hw mode */
if (!rtw_hal_ltr_is_hw_ctrl(phl_com, phl_info->hal))
return;
tx_tp_m = sts->tx_tp_kbits >> 10;
rx_tp_m = sts->rx_tp_kbits >> 10;
/* PHL_INFO("%s tx_tp_m = %u /rx_tp_m = %u \n", __func__, tx_tp_m, rx_tp_m);*/
if ((tx_tp_m > TP_MBPS || rx_tp_m > TP_MBPS) && !start) {
start = true;
rtw_hal_ltr_en_hw_mode(phl_info->hal, false);
rtw_hal_ltr_sw_trigger(phl_info->hal, RTW_PCIE_LTR_SW_ACT);
}
if (start && tx_tp_m < TP_MBPS && rx_tp_m < TP_MBPS) {
start = false;
rtw_hal_ltr_en_hw_mode(phl_info->hal, true);
}
}
#endif /* RTW_WKARD_DYNAMIC_LTR */
#endif /* CONFIG_PCI_HCI */
|
2301_81045437/rtl8852be
|
phl/phl_dm.c
|
C
|
agpl-3.0
| 4,021
|
/******************************************************************************
*
* Copyright(c) 2021 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#ifndef _PHL_DM_H_
#define _PHL_DM_H_
#ifdef CONFIG_PCI_HCI
#ifdef RTW_WKARD_DYNAMIC_LTR
enum rtw_phl_status
phl_ltr_sw_trigger(struct rtw_phl_com_t *phl_com, void *hal,
enum rtw_pcie_ltr_state state);
enum rtw_phl_status
phl_ltr_sw_ctrl(struct rtw_phl_com_t *phl_com, void *hal, bool enable);
void
phl_ltr_hw_ctrl(struct rtw_phl_com_t *phl_com, bool enable);
void phl_ltr_sw_ctrl_ntfy(struct rtw_phl_com_t *phl_com, bool enable);
u8 phl_ltr_get_cur_state(struct rtw_phl_com_t *phl_com);
u32 phl_ltr_get_last_trigger_time(struct rtw_phl_com_t *phl_com);
u32 phl_ltr_get_tri_cnt(struct rtw_phl_com_t *phl_com,
enum rtw_pcie_ltr_state state);
void phl_ltr_ctrl_watchdog(struct phl_info_t *phl_info);
#endif
#endif
#endif /*_PHL_DM_H_*/
|
2301_81045437/rtl8852be
|
phl/phl_dm.h
|
C
|
agpl-3.0
| 1,386
|
/******************************************************************************
*
* Copyright(c) 2020 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#define _PHL_ECSA_C_
#include "phl_headers.h"
#ifdef CONFIG_PHL_ECSA
void
_phl_ecsa_dump_param(
struct rtw_phl_ecsa_param *param
)
{
PHL_TRACE(COMP_PHL_ECSA, _PHL_INFO_, "%s: Channel %d\n", __FUNCTION__,
param->ch);
PHL_TRACE(COMP_PHL_ECSA, _PHL_INFO_, "%s: Op class %d\n", __FUNCTION__,
param->op_class);
PHL_TRACE(COMP_PHL_ECSA, _PHL_INFO_, "%s: Count %d\n", __FUNCTION__,
param->count);
PHL_TRACE(COMP_PHL_ECSA, _PHL_INFO_, "%s: Mode %d\n", __FUNCTION__,
param->mode);
PHL_TRACE(COMP_PHL_ECSA, _PHL_INFO_, "%s: Delay time %d\n", __FUNCTION__,
param->delay_start_ms);
PHL_DUMP_CHAN_DEF(&(param->new_chan_def));
}
enum rtw_phl_status
_phl_ecsa_tx_pause(
struct phl_ecsa_ctrl_t *ecsa_ctrl
)
{
enum rtw_phl_status status = RTW_PHL_STATUS_SUCCESS;
struct rtw_phl_com_t *phl_com = ecsa_ctrl->phl_com;
struct phl_info_t *phl_info = (struct phl_info_t *)phl_com->phl_priv;
struct rtw_wifi_role_t *wifi_role = ecsa_ctrl->role;
/* Pause SW Tx */
rtw_phl_tx_stop(phl_info);
rtw_phl_tx_req_notify(phl_info);
/* Disable hw tx all */
if (rtw_hal_dfs_pause_tx(phl_info->hal, wifi_role->hw_band, true) ==
RTW_HAL_STATUS_SUCCESS) {
status = RTW_PHL_STATUS_SUCCESS;
PHL_TRACE(COMP_PHL_ECSA, _PHL_INFO_, "[ECSA] hw tx pause OK\n");
} else {
status = RTW_PHL_STATUS_FAILURE;
PHL_TRACE(COMP_PHL_ECSA, _PHL_WARNING_, "[ECSA] hw tx pause fail\n");
}
return status;
}
enum rtw_phl_status
_phl_ecsa_tx_resume(
struct phl_ecsa_ctrl_t *ecsa_ctrl
)
{
enum rtw_phl_status status = RTW_PHL_STATUS_SUCCESS;
struct rtw_phl_com_t *phl_com = ecsa_ctrl->phl_com;
struct phl_info_t *phl_info = (struct phl_info_t *)phl_com->phl_priv;
struct rtw_wifi_role_t *wifi_role = ecsa_ctrl->role;
/* Enable hw tx all */
if (rtw_hal_dfs_pause_tx(phl_info->hal, wifi_role->hw_band, false) ==
RTW_HAL_STATUS_SUCCESS) {
status = RTW_PHL_STATUS_SUCCESS;
PHL_TRACE(COMP_PHL_ECSA, _PHL_INFO_, "[ECSA] hw tx unpause OK\n");
} else {
status = RTW_PHL_STATUS_FAILURE;
PHL_TRACE(COMP_PHL_ECSA, _PHL_WARNING_, "[ECSA] hw tx unpause fail\n");
}
rtw_phl_tx_resume(phl_info);
return status;
}
u32
_phl_ecsa_calculate_next_timer_ap(
struct phl_ecsa_ctrl_t *ecsa_ctrl
)
{
struct rtw_phl_com_t *phl_com = ecsa_ctrl->phl_com;
struct phl_info_t *phl_info = (struct phl_info_t *)phl_com->phl_priv;
struct rtw_bcn_info_cmn *bcn_cmn = NULL;
u32 tsf_h = 0, tsf_l = 0;
u64 tsf = 0;
u32 beacon_period_us = 0, timeslot_us = 0, next_timeslot_us = 0;
u32 current_time_ms = _os_get_cur_time_ms();
if (RTW_HAL_STATUS_SUCCESS != rtw_hal_get_tsf(phl_info->hal,
ecsa_ctrl->role->hw_port,
&tsf_h,
&tsf_l)) {
PHL_TRACE(COMP_PHL_ECSA, _PHL_WARNING_, "_phl_ecsa_timer_callback(): Get tsf fail\n");
return 0;
}
tsf = tsf_h;
tsf = tsf << 32;
tsf |= tsf_l;
bcn_cmn = &ecsa_ctrl->role->bcn_cmn;
beacon_period_us = bcn_cmn->bcn_interval * TU;
timeslot_us = (u32)_os_modular64(tsf, beacon_period_us);
PHL_TRACE(COMP_PHL_ECSA, _PHL_INFO_, "%s: CurTimeMs = %d State = %x timeslot = %d\n",
__FUNCTION__, current_time_ms, ecsa_ctrl->state, timeslot_us);
if(ecsa_ctrl->state == ECSA_STATE_START){
next_timeslot_us = beacon_period_us - timeslot_us + (2 * TU);
}
/* To make sure first ECSA IE show in Beacon */
else if(ecsa_ctrl->state == ECSA_STATE_UPDATE_FIRST_BCN_DONE){
next_timeslot_us = (beacon_period_us - timeslot_us -
ECSA_UPDATE_BCN_BEFORE_TBTT_US);
ecsa_ctrl->expected_tbtt_ms = current_time_ms +
(beacon_period_us - timeslot_us)/1000;
}
else if(ecsa_ctrl->state == ECSA_STATE_COUNT_DOWN){
if(ecsa_ctrl->ecsa_param.count == 1){
next_timeslot_us = (beacon_period_us - timeslot_us) +
ECSA_SWITCH_TIME_AFTER_LAST_COUNT_DOWN;
}
else{
next_timeslot_us = (beacon_period_us - timeslot_us) +
(beacon_period_us - ECSA_UPDATE_BCN_BEFORE_TBTT_US);
ecsa_ctrl->expected_tbtt_ms = current_time_ms +
(2 * beacon_period_us - timeslot_us)/1000;
}
}
PHL_TRACE(COMP_PHL_ECSA, _PHL_INFO_, "%s: Expected tbtt %d!\n", __FUNCTION__, ecsa_ctrl->expected_tbtt_ms);
return next_timeslot_us/1000;
}
u32
_phl_ecsa_calculate_next_timer_sta(
struct phl_ecsa_ctrl_t *ecsa_ctrl
)
{
struct rtw_wifi_role_t *wifi_role = ecsa_ctrl->role;
struct rtw_phl_com_t *phl_com = ecsa_ctrl->phl_com;
struct phl_info_t *phl_info = (struct phl_info_t *)phl_com->phl_priv;
struct rtw_phl_stainfo_t *sta = NULL;
u32 beacon_period_us = 0, next_timeslot = 0;
u32 current_time_ms = 0;
current_time_ms = _os_get_cur_time_ms();
PHL_TRACE(COMP_PHL_ECSA, _PHL_INFO_, "%s: CurTimeMs = %d State = %x\n",
__FUNCTION__, current_time_ms, ecsa_ctrl->state);
sta = rtw_phl_get_stainfo_self(phl_info, wifi_role);
if(sta == NULL){
PHL_TRACE(COMP_PHL_ECSA, _PHL_ERR_, "%s: Get sta info fail!\n",
__FUNCTION__);
return 0;
}
beacon_period_us = sta->asoc_cap.bcn_interval * TU;
if(ecsa_ctrl->state == ECSA_STATE_START){
next_timeslot = 0;
}
else if(ecsa_ctrl->state == ECSA_STATE_COUNT_DOWN){
u8 count = ecsa_ctrl->ecsa_param.count;
next_timeslot = (beacon_period_us * count) / 1000; /* ms */
}
else if(ecsa_ctrl->state == ECSA_STATE_SWITCH){
next_timeslot = 1000; /* 1s */
}
return next_timeslot;
}
void
_phl_ecsa_calculate_next_timer(
struct phl_ecsa_ctrl_t *ecsa_ctrl
)
{
struct rtw_phl_com_t *phl_com = ecsa_ctrl->phl_com;
void *d = phlcom_to_drvpriv(phl_com);
u32 next_timeslot = 0; /* ms */
if(IS_ECSA_TYPE_AP(ecsa_ctrl))
next_timeslot = _phl_ecsa_calculate_next_timer_ap(ecsa_ctrl);
if(IS_ECSA_TYPE_STA(ecsa_ctrl))
next_timeslot = _phl_ecsa_calculate_next_timer_sta(ecsa_ctrl);;
PHL_TRACE(COMP_PHL_ECSA, _PHL_INFO_, "%s: Next time slot %d!\n", __FUNCTION__, next_timeslot);
_os_set_timer(d, &ecsa_ctrl->timer, next_timeslot);
}
void _phl_ecsa_state_change_ap(
struct phl_ecsa_ctrl_t *ecsa_ctrl
)
{
enum rtw_phl_status status = RTW_PHL_STATUS_SUCCESS;
struct rtw_phl_com_t *phl_com = ecsa_ctrl->phl_com;
struct phl_info_t *phl_info = (struct phl_info_t *)phl_com->phl_priv;
struct phl_msg msg = {0};
struct phl_msg_attribute attr = {0};
void *d = phlcom_to_drvpriv(phl_com);
SET_MSG_MDL_ID_FIELD(msg.msg_id, PHL_FG_MDL_ECSA);
PHL_TRACE(COMP_PHL_ECSA, _PHL_INFO_, "%s: CurTimeMs = %d State = %x\n",
__FUNCTION__, _os_get_cur_time_ms(), ecsa_ctrl->state);
/* Protect ECSA state change to prevent timer callback racing */
_os_spinlock(d, &(ecsa_ctrl->lock), _bh, NULL);
if(ecsa_ctrl->state == ECSA_STATE_WAIT_DELAY){
status = rtw_phl_ecsa_cmd_request(phl_info, ecsa_ctrl->role);
if(status != RTW_PHL_STATUS_SUCCESS){
PHL_TRACE(COMP_PHL_ECSA, _PHL_WARNING_,
"%s: ECSA command fail!\n", __FUNCTION__);
ecsa_ctrl->state = ECSA_STATE_NONE;
}
else{
ecsa_ctrl->state = ECSA_STATE_START;
}
}
else if(ecsa_ctrl->state == ECSA_STATE_START){
ecsa_ctrl->state = ECSA_STATE_UPDATE_FIRST_BCN_DONE;
SET_MSG_EVT_ID_FIELD(msg.msg_id, MSG_EVT_ECSA_UPDATE_FIRST_BCN_DONE);
status = phl_disp_eng_send_msg(phl_info, &msg, &attr, NULL);
if(status != RTW_PHL_STATUS_SUCCESS)
PHL_TRACE(COMP_PHL_ECSA, _PHL_WARNING_, "%s: Send msg fail!\n", __FUNCTION__);
}
/* To make sure first ECSA IE show in Beacon */
else if(ecsa_ctrl->state == ECSA_STATE_UPDATE_FIRST_BCN_DONE){
ecsa_ctrl->state = ECSA_STATE_COUNT_DOWN;
SET_MSG_EVT_ID_FIELD(msg.msg_id, MSG_EVT_ECSA_COUNT_DOWN);
status = phl_disp_eng_send_msg(phl_info, &msg, &attr, NULL);
if(status != RTW_PHL_STATUS_SUCCESS)
PHL_TRACE(COMP_PHL_ECSA, _PHL_WARNING_, "%s: Send msg fail!\n", __FUNCTION__);
}
else if(ecsa_ctrl->state == ECSA_STATE_COUNT_DOWN){
if(ecsa_ctrl->ecsa_param.count == 1){
ecsa_ctrl->state = ECSA_STATE_SWITCH;
SET_MSG_EVT_ID_FIELD(msg.msg_id, MSG_EVT_ECSA_SWITCH_START);
msg.rsvd[0] = (u8*)ecsa_ctrl->role;
status = phl_disp_eng_send_msg(phl_info, &msg, &attr, NULL);
if(status != RTW_PHL_STATUS_SUCCESS)
PHL_TRACE(COMP_PHL_ECSA, _PHL_WARNING_, "%s: Send msg fail!\n", __FUNCTION__);
}
else{
SET_MSG_EVT_ID_FIELD(msg.msg_id, MSG_EVT_ECSA_COUNT_DOWN);
status = phl_disp_eng_send_msg(phl_info, &msg, &attr, NULL);
if(status != RTW_PHL_STATUS_SUCCESS)
PHL_TRACE(COMP_PHL_ECSA, _PHL_WARNING_, "%s: Send msg fail!\n", __FUNCTION__);
}
}
_os_spinunlock(d, &(ecsa_ctrl->lock), _bh, NULL);
}
void _phl_ecsa_state_change_sta(
struct phl_ecsa_ctrl_t *ecsa_ctrl
)
{
enum rtw_phl_status status = RTW_PHL_STATUS_SUCCESS;
struct rtw_phl_com_t *phl_com = ecsa_ctrl->phl_com;
struct phl_info_t *phl_info = (struct phl_info_t *)phl_com->phl_priv;
struct phl_msg msg = {0};
struct phl_msg_attribute attr = {0};
void *d = phlcom_to_drvpriv(phl_com);
SET_MSG_MDL_ID_FIELD(msg.msg_id, PHL_FG_MDL_ECSA);
PHL_TRACE(COMP_PHL_ECSA, _PHL_INFO_, "%s: CurTimeMs = %d State = %x\n",
__FUNCTION__, _os_get_cur_time_ms(), ecsa_ctrl->state);
/* Protect ECSA state change to prevent timer callback racing */
_os_spinlock(d, &(ecsa_ctrl->lock), _bh, NULL);
if(ecsa_ctrl->state == ECSA_STATE_WAIT_DELAY){
status = rtw_phl_ecsa_cmd_request(phl_info, ecsa_ctrl->role);
if(status != RTW_PHL_STATUS_SUCCESS){
PHL_TRACE(COMP_PHL_ECSA, _PHL_WARNING_,
"%s: ECSA command fail!\n", __FUNCTION__);
ecsa_ctrl->state = ECSA_STATE_NONE;
}
else{
ecsa_ctrl->state = ECSA_STATE_START;
}
}
else if(ecsa_ctrl->state == ECSA_STATE_START){
ecsa_ctrl->state = ECSA_STATE_COUNT_DOWN;
SET_MSG_EVT_ID_FIELD(msg.msg_id, MSG_EVT_ECSA_COUNT_DOWN);
status = phl_disp_eng_send_msg(phl_info, &msg, &attr, NULL);
if(status != RTW_PHL_STATUS_SUCCESS)
PHL_TRACE(COMP_PHL_ECSA, _PHL_WARNING_, "%s: Send msg fail!\n", __FUNCTION__);
}
else if(ecsa_ctrl->state == ECSA_STATE_COUNT_DOWN){
ecsa_ctrl->state = ECSA_STATE_SWITCH;
SET_MSG_EVT_ID_FIELD(msg.msg_id, MSG_EVT_ECSA_SWITCH_START);
msg.rsvd[0] = (u8*)ecsa_ctrl->role;
status = phl_disp_eng_send_msg(phl_info, &msg, &attr, NULL);
if(status != RTW_PHL_STATUS_SUCCESS)
PHL_TRACE(COMP_PHL_ECSA, _PHL_WARNING_, "%s: Send msg fail!\n", __FUNCTION__);
}
else if(ecsa_ctrl->state == ECSA_STATE_SWITCH){
SET_MSG_EVT_ID_FIELD(msg.msg_id, MSG_EVT_ECSA_CHECK_TX_RESUME);
status = phl_disp_eng_send_msg(phl_info, &msg, &attr, NULL);
if(status != RTW_PHL_STATUS_SUCCESS)
PHL_TRACE(COMP_PHL_ECSA, _PHL_WARNING_, "%s: Send msg fail!\n", __FUNCTION__);
}
_os_spinunlock(d, &(ecsa_ctrl->lock), _bh, NULL);
}
void
_phl_ecsa_timer_callback(
void *context
)
{
struct phl_ecsa_ctrl_t *ecsa_ctrl = (struct phl_ecsa_ctrl_t *)context;
if(IS_ECSA_TYPE_AP(ecsa_ctrl))
_phl_ecsa_state_change_ap(ecsa_ctrl);
if(IS_ECSA_TYPE_STA(ecsa_ctrl))
_phl_ecsa_state_change_sta(ecsa_ctrl);
}
void
_phl_ecsa_cmd_abort_hdlr(
void* dispr,
void* priv,
bool abort
)
{
struct phl_ecsa_ctrl_t *ecsa_ctrl = (struct phl_ecsa_ctrl_t *)priv;
struct rtw_wifi_role_t *wifi_role = ecsa_ctrl->role;
struct rtw_phl_com_t *phl_com = wifi_role->phl_com;
struct phl_info_t *phl_info = (struct phl_info_t *)phl_com->phl_priv;
struct rtw_phl_ecsa_ops *ops = &ecsa_ctrl->ops;
struct phl_msg msg = {0};
struct phl_msg_attribute attr = {0};
enum rtw_phl_status pstatus = RTW_PHL_STATUS_SUCCESS;
void *d = phlcom_to_drvpriv(phl_com);
_os_cancel_timer(d, &ecsa_ctrl->timer);
/* ECSA AP abort handle */
if(IS_ECSA_TYPE_AP(ecsa_ctrl) &&
ecsa_ctrl->ecsa_param.flag != 0){
ecsa_ctrl->state = ECSA_STATE_NONE;
CLEAR_STATUS_FLAG(ecsa_ctrl->ecsa_param.flag,
ECSA_PARAM_FLAG_APPEND_BCN);
CLEAR_STATUS_FLAG(ecsa_ctrl->ecsa_param.flag,
ECSA_PARAM_FLAG_APPEND_PROBERSP);
/* Update Bcn */
if(ops->update_beacon)
ops->update_beacon(ops->priv, wifi_role);
}
/* ECSA STA abort handle */
if(IS_ECSA_TYPE_STA(ecsa_ctrl)){
if(ecsa_ctrl->ecsa_param.mode == true)
_phl_ecsa_tx_resume(ecsa_ctrl);
if(ops->ecsa_complete)
ops->ecsa_complete(ops->priv, wifi_role);
}
SET_MSG_MDL_ID_FIELD(msg.msg_id, PHL_FG_MDL_ECSA);
SET_MSG_EVT_ID_FIELD(msg.msg_id, MSG_EVT_ECSA_DONE);
if(abort)
attr.opt = MSG_OPT_SEND_IN_ABORT;
pstatus = phl_disp_eng_send_msg(phl_info, &msg, &attr, NULL);
if(pstatus != RTW_PHL_STATUS_SUCCESS) {
PHL_ERR("%s:[ECSA] dispr_send_msg failed (0x%X)\n",
__FUNCTION__, pstatus);
}
}
enum phl_mdl_ret_code
_phl_ecsa_cmd_acquired(
void* dispr,
void* priv)
{
enum rtw_phl_status status = RTW_PHL_STATUS_SUCCESS;
enum phl_mdl_ret_code ret = MDL_RET_FAIL;
struct phl_ecsa_ctrl_t *ecsa_ctrl = (struct phl_ecsa_ctrl_t *)priv;
struct rtw_wifi_role_t *wifi_role = ecsa_ctrl->role;
struct rtw_phl_com_t *phl_com = wifi_role->phl_com;
struct phl_info_t *phl_info = (struct phl_info_t *)phl_com->phl_priv;
struct phl_msg msg = {0};
struct phl_msg_attribute attr = {0};
SET_MSG_MDL_ID_FIELD(msg.msg_id, PHL_FG_MDL_ECSA);
SET_MSG_EVT_ID_FIELD(msg.msg_id, MSG_EVT_ECSA_START);
status = phl_disp_eng_send_msg(phl_info, &msg, &attr, NULL);
if(status != RTW_PHL_STATUS_SUCCESS){
PHL_TRACE(COMP_PHL_ECSA, _PHL_WARNING_, "%s: Send msg fail!\n", __FUNCTION__);
goto exit;
}
ret = MDL_RET_SUCCESS;
exit:
return ret;
}
enum phl_mdl_ret_code
_phl_ecsa_cmd_abort(
void* dispr,
void* priv)
{
_phl_ecsa_cmd_abort_hdlr(dispr, priv, true);
return MDL_RET_SUCCESS;
}
enum phl_mdl_ret_code
_phl_ecsa_cmd_msg_hdlr(
void* dispr,
void* priv,
struct phl_msg* msg)
{
struct phl_ecsa_ctrl_t *ecsa_ctrl = (struct phl_ecsa_ctrl_t *)priv;
struct rtw_wifi_role_t *wifi_role = ecsa_ctrl->role;
struct rtw_phl_com_t *phl_com = wifi_role->phl_com;
struct phl_info_t *phl_info = (struct phl_info_t *)phl_com->phl_priv;
void *d = phlcom_to_drvpriv(phl_com);
enum phl_mdl_ret_code ret = MDL_RET_IGNORE;
enum rtw_phl_status status = RTW_PHL_STATUS_SUCCESS;
struct phl_msg nextmsg = {0};
struct phl_msg_attribute attr = {0};
struct rtw_phl_ecsa_ops *ops = &ecsa_ctrl->ops;
u32 current_time_ms = _os_get_cur_time_ms();
struct rtw_bcn_info_cmn *bcn_cmn = &ecsa_ctrl->role->bcn_cmn;
u32 beacon_period_ms = bcn_cmn->bcn_interval * TU / 1000;
u8 countdown_n = 1;
struct rtw_chan_def chdef_to_switch = {0};
if(MSG_MDL_ID_FIELD(msg->msg_id) != PHL_FG_MDL_ECSA) {
return MDL_RET_IGNORE;
}
if(IS_MSG_FAIL(msg->msg_id)) {
_phl_ecsa_cmd_abort_hdlr(dispr, priv, false);
status = phl_disp_eng_free_token(phl_info,
wifi_role->hw_band,
&ecsa_ctrl->req_hdl);
if(status != RTW_PHL_STATUS_SUCCESS)
PHL_WARN("%s: Free token fail!\n", __FUNCTION__);
return MDL_RET_SUCCESS;
}
SET_MSG_MDL_ID_FIELD(nextmsg.msg_id, PHL_FG_MDL_ECSA);
switch(MSG_EVT_ID_FIELD(msg->msg_id)){
case MSG_EVT_ECSA_START:
PHL_TRACE(COMP_PHL_ECSA, _PHL_INFO_,
"%s: MSG_EVT_ECSA_START\n", __FUNCTION__);
if(IS_ECSA_TYPE_AP(ecsa_ctrl)){
SET_STATUS_FLAG(ecsa_ctrl->ecsa_param.flag,
ECSA_PARAM_FLAG_APPEND_BCN);
/* Update Bcn */
if(ops->update_beacon)
ops->update_beacon(ops->priv, wifi_role);
}
if(IS_ECSA_TYPE_STA(ecsa_ctrl) &&
ecsa_ctrl->ecsa_param.mode == true){
_phl_ecsa_tx_pause(ecsa_ctrl);
}
_phl_ecsa_calculate_next_timer(ecsa_ctrl);
break;
case MSG_EVT_ECSA_UPDATE_FIRST_BCN_DONE:
PHL_TRACE(COMP_PHL_ECSA, _PHL_INFO_,
"%s: MSG_EVT_ECSA_UPDATE_FIRST_BCN_DONE\n", __FUNCTION__);
SET_STATUS_FLAG(ecsa_ctrl->ecsa_param.flag,
ECSA_PARAM_FLAG_APPEND_PROBERSP);
_phl_ecsa_calculate_next_timer(ecsa_ctrl);
break;
case MSG_EVT_ECSA_COUNT_DOWN:
PHL_TRACE(COMP_PHL_ECSA, _PHL_INFO_,
"%s: MSG_EVT_ECSA_COUNT_DOWN\n", __FUNCTION__);
/* Count down mode of STA ECSA only calculate the switch time */
if(IS_ECSA_TYPE_STA(ecsa_ctrl)){
_phl_ecsa_calculate_next_timer(ecsa_ctrl);
break;
}
/* Count down mode of AP ECSA calculate the update beacon time */
if(ecsa_ctrl->expected_tbtt_ms > current_time_ms){
countdown_n = 1;
}
else{
/*
* There may be delay time during msg delivery,
* calulate the actual countdown value
*/
countdown_n = (u8)((current_time_ms-(ecsa_ctrl->expected_tbtt_ms))%beacon_period_ms+1);
}
PHL_TRACE(COMP_PHL_ECSA, _PHL_INFO_,
"%s: count down %d\n", __FUNCTION__, countdown_n);
if(ecsa_ctrl->ecsa_param.count > countdown_n){
ecsa_ctrl->ecsa_param.count -= countdown_n;
/* Update Bcn */
if(ops->update_beacon)
ops->update_beacon(ops->priv, wifi_role);
_phl_ecsa_calculate_next_timer(ecsa_ctrl);
}
else{
/*
* If the countdown value is less than 1,
* we have to switch channel immediately
*/
ecsa_ctrl->ecsa_param.count = 0;
ecsa_ctrl->state = ECSA_STATE_SWITCH;
SET_MSG_EVT_ID_FIELD(nextmsg.msg_id, MSG_EVT_ECSA_SWITCH_START);
nextmsg.rsvd[0] = (u8*)ecsa_ctrl->role;
status = phl_disp_eng_send_msg(phl_info,
&nextmsg,
&attr,
NULL);
if(status != RTW_PHL_STATUS_SUCCESS)
PHL_TRACE(COMP_PHL_ECSA, _PHL_WARNING_, "%s: Send msg fail!\n", __FUNCTION__);
}
break;
case MSG_EVT_ECSA_SWITCH_START:
PHL_TRACE(COMP_PHL_ECSA, _PHL_INFO_,
"%s: MSG_EVT_ECSA_SWITCH_START\n", __FUNCTION__);
/* Update channel info */
if(ops->update_chan_info){
ops->update_chan_info(ops->priv,
wifi_role,
ecsa_ctrl->ecsa_param.new_chan_def);
PHL_TRACE(COMP_PHL_ECSA, _PHL_INFO_,
"%s: update_chan_info done!\n", __FUNCTION__);
}
else{
PHL_TRACE(COMP_PHL_ECSA, _PHL_INFO_,
"%s: update_chan_info is NULL!\n", __FUNCTION__);
}
/* AP mode ECSA should update beacon to remove ECSA IE and update the channel info */
if(IS_ECSA_TYPE_AP(ecsa_ctrl)){
CLEAR_STATUS_FLAG(ecsa_ctrl->ecsa_param.flag,
ECSA_PARAM_FLAG_APPEND_BCN);
CLEAR_STATUS_FLAG(ecsa_ctrl->ecsa_param.flag,
ECSA_PARAM_FLAG_APPEND_PROBERSP);
/* Update Bcn */
if(ops->update_beacon)
ops->update_beacon(ops->priv, wifi_role);
}
/*
* We should use chandef of the chanctx to switch,
* the bw may not be same as the ECSA operating class
* because of the SCC mode with different bandwidth.
*/
if(wifi_role->chanctx != NULL){
_os_mem_cpy(d, &chdef_to_switch, &(wifi_role->chanctx->chan_def),
sizeof(struct rtw_chan_def));
if(wifi_role->chanctx->chan_def.chan !=
ecsa_ctrl->ecsa_param.new_chan_def.chan)
PHL_TRACE(COMP_PHL_ECSA, _PHL_WARNING_,
"%s: channel is not same as ECSA parameter!\n",
__FUNCTION__);
}
else{
_os_mem_cpy(d, &chdef_to_switch, &(ecsa_ctrl->ecsa_param.new_chan_def),
sizeof(struct rtw_chan_def));
PHL_TRACE(COMP_PHL_ECSA, _PHL_WARNING_,
"%s: chanctx of role is NULL use ECSA parameter!\n",
__FUNCTION__);
}
/* Switch channel */
phl_set_ch_bw(wifi_role, &chdef_to_switch, true);
SET_MSG_EVT_ID_FIELD(nextmsg.msg_id, MSG_EVT_ECSA_SWITCH_DONE);
nextmsg.rsvd[0] = (u8*)ecsa_ctrl->role;
status = phl_disp_eng_send_msg(phl_info,
&nextmsg,
&attr,
NULL);
if(status != RTW_PHL_STATUS_SUCCESS)
PHL_TRACE(COMP_PHL_ECSA, _PHL_WARNING_,
"%s: Send msg fail!\n", __FUNCTION__);
break;
case MSG_EVT_ECSA_SWITCH_DONE:
PHL_TRACE(COMP_PHL_ECSA, _PHL_INFO_,
"%s: MSG_EVT_ECSA_SWITCH_DONE\n", __FUNCTION__);
if(IS_ECSA_TYPE_STA(ecsa_ctrl) &&
ecsa_ctrl->ecsa_param.mode == true){
SET_MSG_EVT_ID_FIELD(nextmsg.msg_id, MSG_EVT_ECSA_CHECK_TX_RESUME);
status = phl_disp_eng_send_msg(phl_info, &nextmsg, &attr, NULL);
if(status != RTW_PHL_STATUS_SUCCESS)
PHL_TRACE(COMP_PHL_ECSA, _PHL_WARNING_,
"%s: Send msg fail!\n", __FUNCTION__);
break;
}
SET_MSG_EVT_ID_FIELD(nextmsg.msg_id, MSG_EVT_ECSA_DONE);
status = phl_disp_eng_send_msg(phl_info,
&nextmsg,
&attr,
NULL);
if(status != RTW_PHL_STATUS_SUCCESS)
PHL_TRACE(COMP_PHL_ECSA, _PHL_WARNING_,
"%s: Send msg fail!\n", __FUNCTION__);
break;
case MSG_EVT_ECSA_CHECK_TX_RESUME:
PHL_TRACE(COMP_PHL_ECSA, _PHL_INFO_,
"%s: MSG_EVT_ECSA_CHECK_TX_RESUME\n", __FUNCTION__);
if(IS_ECSA_TYPE_STA(ecsa_ctrl) &&
ecsa_ctrl->ecsa_param.mode == true){
/*
* TODO: If driver support DFS-slave with radar
* detection, ECSA should tx un-pause directly
* and the tx pause should be handled by DFS-slave.
*/
if(ops->check_tx_resume_allow){
if(!ops->check_tx_resume_allow(ops->priv, wifi_role)){
PHL_TRACE(COMP_PHL_ECSA, _PHL_INFO_,
"%s: Keep Tx pause...\n", __FUNCTION__);
_phl_ecsa_calculate_next_timer(ecsa_ctrl);
break;
}
}
PHL_TRACE(COMP_PHL_ECSA, _PHL_INFO_,
"%s: Tx resume!\n", __FUNCTION__);
_phl_ecsa_tx_resume(ecsa_ctrl);
}
SET_MSG_EVT_ID_FIELD(nextmsg.msg_id, MSG_EVT_ECSA_DONE);
status = phl_disp_eng_send_msg(phl_info, &nextmsg, &attr, NULL);
if(status != RTW_PHL_STATUS_SUCCESS)
PHL_TRACE(COMP_PHL_ECSA, _PHL_WARNING_,
"%s: Send msg fail!\n", __FUNCTION__);
break;
case MSG_EVT_ECSA_DONE:
PHL_TRACE(COMP_PHL_ECSA, _PHL_INFO_,
"%s: MSG_EVT_ECSA_DONE\n", __FUNCTION__);
ecsa_ctrl->state = ECSA_STATE_NONE;
if(ops->ecsa_complete){
ops->ecsa_complete(ops->priv, wifi_role);
}
else{
PHL_TRACE(COMP_PHL_ECSA, _PHL_WARNING_,
"%s: ecsa_complete is NULL!\n", __FUNCTION__);
}
status = phl_disp_eng_free_token(phl_info,
wifi_role->hw_band,
&ecsa_ctrl->req_hdl);
if(status != RTW_PHL_STATUS_SUCCESS)
PHL_WARN("%s: Free token fail!\n", __FUNCTION__);
break;
default:
break;
}
return ret;
}
enum phl_mdl_ret_code
_phl_ecsa_cmd_set_info(
void* dispr,
void* priv,
struct phl_module_op_info* info)
{
enum phl_mdl_ret_code ret = MDL_RET_IGNORE;
/* PHL_INFO(" %s :: info->op_code=%d \n", __func__, info->op_code); */
return ret;
}
enum phl_mdl_ret_code
_phl_ecsa_cmd_query_info(
void* dispr,
void* priv,
struct phl_module_op_info* info)
{
struct phl_ecsa_ctrl_t *ecsa_ctrl = (struct phl_ecsa_ctrl_t *)priv;
enum phl_mdl_ret_code ret = MDL_RET_IGNORE;
/* PHL_INFO(" %s :: info->op_code=%d \n", __func__, info->op_code); */
switch(info->op_code) {
case FG_REQ_OP_GET_ROLE:
info->outbuf = (u8*)ecsa_ctrl->role;
ret = MDL_RET_SUCCESS;
break;
default:
break;
}
return ret;
}
enum rtw_phl_status
rtw_phl_ecsa_cmd_request(
void *phl,
struct rtw_wifi_role_t *role
)
{
enum rtw_phl_status status = RTW_PHL_STATUS_FAILURE;
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct phl_ecsa_ctrl_t *ecsa_ctrl =
(struct phl_ecsa_ctrl_t *)phl_info->ecsa_ctrl;
struct phl_cmd_token_req req={0};
if(ecsa_ctrl == NULL)
goto exit;
/* Fill foreground command request */
req.module_id= PHL_FG_MDL_ECSA;
req.priv = ecsa_ctrl;
req.role = role;
req.acquired = _phl_ecsa_cmd_acquired;
req.abort = _phl_ecsa_cmd_abort;
req.msg_hdlr = _phl_ecsa_cmd_msg_hdlr;
req.set_info = _phl_ecsa_cmd_set_info;
req.query_info = _phl_ecsa_cmd_query_info;
status = phl_disp_eng_add_token_req(phl, role->hw_band, &req,
&ecsa_ctrl->req_hdl);
if((status != RTW_PHL_STATUS_SUCCESS) &&
(status != RTW_PHL_STATUS_PENDING))
goto exit;
status = RTW_PHL_STATUS_SUCCESS;
exit:
return status;
}
enum rtw_phl_status
rtw_phl_ecsa_start(
void *phl,
struct rtw_wifi_role_t *role,
struct rtw_phl_ecsa_param *param
)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
void *d = phlcom_to_drvpriv(phl_info->phl_com);
struct phl_ecsa_ctrl_t *ecsa_ctrl =
(struct phl_ecsa_ctrl_t *)phl_info->ecsa_ctrl;
struct rtw_phl_ecsa_param *ecsa_param = &(ecsa_ctrl->ecsa_param);
if(ecsa_ctrl == NULL)
return RTW_PHL_STATUS_FAILURE;
if(ecsa_ctrl->state != ECSA_STATE_NONE){
PHL_TRACE(COMP_PHL_ECSA, _PHL_WARNING_, "%s: ECSA already started!\n",
__FUNCTION__);
return RTW_PHL_STATUS_FAILURE;
}
ecsa_ctrl->role = role;
_os_mem_cpy(d, ecsa_param, param, sizeof(struct rtw_phl_ecsa_param));
_phl_ecsa_dump_param(ecsa_param);
ecsa_ctrl->state = ECSA_STATE_WAIT_DELAY;
PHL_TRACE(COMP_PHL_ECSA, _PHL_INFO_, "%s: ECSA start after %dms !\n",
__FUNCTION__, ecsa_ctrl->ecsa_param.delay_start_ms);
_os_set_timer(d, &ecsa_ctrl->timer, ecsa_ctrl->ecsa_param.delay_start_ms);
return RTW_PHL_STATUS_SUCCESS;
}
enum rtw_phl_status
rtw_phl_ecsa_cancel(
void *phl,
struct rtw_wifi_role_t *role
)
{
enum rtw_phl_status status = RTW_PHL_STATUS_SUCCESS;
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
void *d = phlcom_to_drvpriv(phl_info->phl_com);
struct phl_ecsa_ctrl_t *ecsa_ctrl =
(struct phl_ecsa_ctrl_t *)phl_info->ecsa_ctrl;
if(ecsa_ctrl == NULL){
status = RTW_PHL_STATUS_FAILURE;
goto exit;
}
if(ecsa_ctrl->state == ECSA_STATE_NONE)
goto exit;
_os_cancel_timer(d, &ecsa_ctrl->timer);
_os_spinlock(d, &(ecsa_ctrl->lock), _bh, NULL);
if(ecsa_ctrl->state > ECSA_STATE_WAIT_DELAY){
status = phl_disp_eng_cancel_token_req(phl_info,
role->hw_band,
&ecsa_ctrl->req_hdl);
if(status != RTW_PHL_STATUS_SUCCESS){
PHL_TRACE(COMP_PHL_ECSA, _PHL_WARNING_,
"%s: ECSA cancel req fail!\n", __FUNCTION__);
}
}
else{
ecsa_ctrl->state = ECSA_STATE_NONE;
}
_os_spinunlock(d, &(ecsa_ctrl->lock), _bh, NULL);
exit:
return status;
}
enum rtw_phl_status
rtw_phl_ecsa_get_param(
void *phl,
struct rtw_phl_ecsa_param **param
)
{
enum rtw_phl_status status = RTW_PHL_STATUS_FAILURE;
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct phl_ecsa_ctrl_t *ecsa_ctrl =
(struct phl_ecsa_ctrl_t *)phl_info->ecsa_ctrl;
if(ecsa_ctrl == NULL)
goto exit;
*param = &ecsa_ctrl->ecsa_param;
exit:
return status;
}
enum rtw_phl_status
phl_ecsa_ctrl_init(
struct phl_info_t *phl_info
)
{
void *drv_priv = phl_to_drvpriv(phl_info);
struct phl_ecsa_ctrl_t *ecsa_ctrl = NULL;
ecsa_ctrl = _os_mem_alloc(drv_priv, sizeof(struct phl_ecsa_ctrl_t));
if (ecsa_ctrl == NULL) {
phl_info->ecsa_ctrl = NULL;
return RTW_PHL_STATUS_FAILURE;
}
phl_info->ecsa_ctrl = ecsa_ctrl;
/* set default value */
ecsa_ctrl->state = ECSA_STATE_NONE;
ecsa_ctrl->phl_com = phl_info->phl_com;
ecsa_ctrl->role = NULL;
ecsa_ctrl->expected_tbtt_ms = 0;
_os_init_timer(drv_priv, &ecsa_ctrl->timer, _phl_ecsa_timer_callback,
ecsa_ctrl, "phl_ecsa_timer");
_os_spinlock_init(drv_priv, &(ecsa_ctrl->lock));
return RTW_PHL_STATUS_SUCCESS;
}
void
phl_ecsa_ctrl_deinit(
struct phl_info_t *phl_info
)
{
void *drv_priv = phl_to_drvpriv(phl_info);
struct phl_ecsa_ctrl_t *ecsa_ctrl =
(struct phl_ecsa_ctrl_t *)(phl_info->ecsa_ctrl);
if (ecsa_ctrl == NULL)
return;
_os_spinlock_free(drv_priv, &(ecsa_ctrl->lock));
_os_release_timer(drv_priv, &ecsa_ctrl->timer);
_os_mem_free(drv_priv, ecsa_ctrl, sizeof(struct phl_ecsa_ctrl_t));
phl_info->ecsa_ctrl = NULL;
}
enum rtw_phl_status
rtw_phl_ecsa_init_ops(
void *phl,
struct rtw_phl_ecsa_ops *ops)
{
enum rtw_phl_status status = RTW_PHL_STATUS_FAILURE;
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct phl_ecsa_ctrl_t *ecsa_ctrl =
(struct phl_ecsa_ctrl_t *)phl_info->ecsa_ctrl;
if(ecsa_ctrl == NULL)
goto exit;
ecsa_ctrl->ops.priv = ops->priv;
ecsa_ctrl->ops.update_beacon = ops->update_beacon;
ecsa_ctrl->ops.update_chan_info = ops->update_chan_info;
ecsa_ctrl->ops.check_ecsa_allow = ops->check_ecsa_allow;
ecsa_ctrl->ops.ecsa_complete = ops->ecsa_complete;
ecsa_ctrl->ops.check_tx_resume_allow = ops->check_tx_resume_allow;
status = RTW_PHL_STATUS_SUCCESS;
exit:
return status;
}
#ifdef CONFIG_PHL_ECSA_EXTEND_OPTION
void
rtw_phl_ecsa_extend_option_hdlr(
u32 extend_option,
struct rtw_phl_ecsa_param *param
)
{
if ((extend_option & ECSA_EX_OPTION_FORCE_BW20) &&
(param->new_chan_def.bw != CHANNEL_WIDTH_20)) {
/* force 20M mode, set attributes accordingly */
param->new_chan_def.bw = CHANNEL_WIDTH_20;
param->new_chan_def.center_ch = param->new_chan_def.chan;
param->new_chan_def.offset = CHAN_OFFSET_NO_EXT;
param->op_class = rtw_phl_get_operating_class(param->new_chan_def);
}
}
#endif
bool
rtw_phl_ecsa_check_allow(
void *phl,
struct rtw_wifi_role_t *role,
struct rtw_chan_def chan_def,
enum phl_ecsa_start_reason reason,
#ifdef CONFIG_PHL_ECSA_EXTEND_OPTION
u32 *extend_option,
#endif
u32 *delay_start_ms
)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct phl_ecsa_ctrl_t *ecsa_ctrl =
(struct phl_ecsa_ctrl_t *)phl_info->ecsa_ctrl;
struct rtw_phl_ecsa_ops *ops = &(ecsa_ctrl->ops);
bool ecsa_allow = false;
if(ops->check_ecsa_allow)
ecsa_allow = ops->check_ecsa_allow(ops->priv,
role,
chan_def,
reason,
#ifdef CONFIG_PHL_ECSA_EXTEND_OPTION
extend_option,
#endif
delay_start_ms);
return ecsa_allow;
}
#endif /* CONFIG_PHL_ECSA */
|
2301_81045437/rtl8852be
|
phl/phl_ecsa.c
|
C
|
agpl-3.0
| 29,214
|
/******************************************************************************
*
* Copyright(c) 2020 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#ifndef _PHL_ECSA_H_
#define _PHL_ECSA_H_
#define ECSA_UPDATE_BCN_BEFORE_TBTT 25 /* unit: TU */
#define ECSA_UPDATE_BCN_BEFORE_TBTT_US (ECSA_UPDATE_BCN_BEFORE_TBTT * TU)
#define ECSA_DEFAULT_CHANNEL_SWITCH_COUNT 10
#define ECSA_SWITCH_TIME_AFTER_LAST_COUNT_DOWN (10 * TU)
#define IS_ECSA_TYPE_AP(_ecsa_ctrl) (_ecsa_ctrl->ecsa_param.ecsa_type == ECSA_TYPE_AP)
#define IS_ECSA_TYPE_STA(_ecsa_ctrl) (_ecsa_ctrl->ecsa_param.ecsa_type == ECSA_TYPE_STA)
enum phl_ecsa_state{
ECSA_STATE_NONE = 0,
ECSA_STATE_WAIT_DELAY = 1,
ECSA_STATE_START,
ECSA_STATE_UPDATE_FIRST_BCN_DONE,
ECSA_STATE_COUNT_DOWN,
ECSA_STATE_SWITCH,
ECSA_STATE_DONE
};
struct phl_ecsa_ctrl_t{
enum phl_ecsa_state state;
struct rtw_phl_com_t *phl_com;
struct rtw_wifi_role_t *role;
struct rtw_phl_ecsa_param ecsa_param;
struct rtw_phl_ecsa_ops ops;
_os_timer timer;
_os_lock lock;
u32 expected_tbtt_ms;
u32 req_hdl;
};
#ifdef CONFIG_PHL_ECSA
enum rtw_phl_status phl_ecsa_ctrl_init(struct phl_info_t *phl_info);
void phl_ecsa_ctrl_deinit(struct phl_info_t *phl_info);
#else
#define phl_ecsa_ctrl_init(_phl_info) RTW_PHL_STATUS_SUCCESS
#define phl_ecsa_ctrl_deinit(_phl_info)
#endif /* CONFIG_PHL_ECSA */
#endif /*_PHL_ECSA_H_*/
|
2301_81045437/rtl8852be
|
phl/phl_ecsa.h
|
C
|
agpl-3.0
| 1,859
|
/******************************************************************************
*
* Copyright(c) 2020 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#ifndef _PHL_ECSA_EXPORT_H_
#define _PHL_ECSA_EXPORT_H_
#define CHANNEL_SWITCH_MODE_NORMAL 0
#define CHANNEL_SWITCH_MODE_STOP_TX 1
enum phl_ecsa_type{
ECSA_TYPE_NONE = 0,
ECSA_TYPE_AP = 1,
ECSA_TYPE_STA = 2
};
enum phl_ecsa_param_flag{
ECSA_PARAM_FLAG_APPEND_BCN = BIT0,
ECSA_PARAM_FLAG_APPEND_PROBERSP = BIT1,
};
enum phl_ecsa_start_reason{
ECSA_START_MCC_24G_TO_24G = 0,
ECSA_START_MCC_5G_TO_5G = 1,
ECSA_START_MCC_24G_TO_5G = 2,
ECSA_START_MCC_5G_TO_24G = 3,
ECSA_START_CORE_TRIGGER = 4,
ECSA_START_UNKNOWN
};
#ifdef CONFIG_PHL_ECSA_EXTEND_OPTION
enum phl_ecsa_extend_option{
ECSA_EX_OPTION_FORCE_BW20 = BIT0,
};
#endif
struct rtw_phl_ecsa_param{
enum phl_ecsa_type ecsa_type;
u8 flag;
u8 mode; /* CHANNEL_SWITCH_MODE_NORMAL or CHANNEL_SWITCH_MODE_STOP_TX */
u8 op_class;
u8 ch;
u8 count;
u32 delay_start_ms;
struct rtw_chan_def new_chan_def;
};
/*
* priv: ops private, define by core layer
* update_beacon: Notify core to construct and update beacon
* update_chan_info: Notify core to update AP mode channel information
* check_ecsa_allow: Provide reason and ask core if ecsa is allowed or not,
* core can provide a delay time(ms) to delay start ECSA
* ecsa_complete: Notify core to reset csa related Information
* check_tx_resume_allow: Check core is allowed to resume tx paused by csa mode == 1
*/
struct rtw_phl_ecsa_ops{
void *priv;
void (*update_beacon)(void *priv, struct rtw_wifi_role_t *role);
void (*update_chan_info)(void *priv,
struct rtw_wifi_role_t *role,
struct rtw_chan_def chan_def);
bool (*check_ecsa_allow)(void *priv,
struct rtw_wifi_role_t *role,
struct rtw_chan_def chan_def,
enum phl_ecsa_start_reason reason,
#ifdef CONFIG_PHL_ECSA_EXTEND_OPTION
u32 *extend_option,
#endif
u32 *delay_start_ms);
void (*ecsa_complete)(void *priv, struct rtw_wifi_role_t *role);
bool (*check_tx_resume_allow)(void *priv, struct rtw_wifi_role_t *role);
};
#ifdef CONFIG_PHL_ECSA
enum rtw_phl_status
rtw_phl_ecsa_init_ops(
void *phl,
struct rtw_phl_ecsa_ops *ops
);
enum rtw_phl_status
rtw_phl_ecsa_cmd_request(
void *phl,
struct rtw_wifi_role_t *role
);
enum rtw_phl_status
rtw_phl_ecsa_start(
void *phl,
struct rtw_wifi_role_t *role,
struct rtw_phl_ecsa_param *param
);
enum rtw_phl_status
rtw_phl_ecsa_cancel(
void *phl,
struct rtw_wifi_role_t *role
);
enum rtw_phl_status
rtw_phl_ecsa_get_param(
void *phl,
struct rtw_phl_ecsa_param **param
);
#ifdef CONFIG_PHL_ECSA_EXTEND_OPTION
void
rtw_phl_ecsa_extend_option_hdlr(
u32 extend_option,
struct rtw_phl_ecsa_param *param
);
#endif
bool
rtw_phl_ecsa_check_allow(
void *phl,
struct rtw_wifi_role_t *role,
struct rtw_chan_def chan_def,
enum phl_ecsa_start_reason reason,
#ifdef CONFIG_PHL_ECSA_EXTEND_OPTION
u32 *extend_option,
#endif
u32 *delay_start_ms
);
#else
#define rtw_phl_ecsa_init_ops(_phl, _ops) RTW_PHL_STATUS_SUCCESS
#define rtw_phl_ecsa_cmd_request(_phl, _role) RTW_PHL_STATUS_SUCCESS
#define rtw_phl_ecsa_start(_phl, _role, _param) RTW_PHL_STATUS_SUCCESS
#define rtw_phl_ecsa_cancel(_phl, _role) RTW_PHL_STATUS_SUCCESS
#define rtw_phl_ecsa_get_param(_phl, _param) RTW_PHL_STATUS_SUCCESS
#ifdef CONFIG_PHL_ECSA_EXTEND_OPTION
#define rtw_phl_ecsa_extend_option_hdlr(_extend_option, _param)
#define rtw_phl_ecsa_check_allow(_phl, _role, _chan_def, _reason, _extend_option, _delay_start_ms) false
#else
#define rtw_phl_ecsa_check_allow(_phl, _role, _chan_def, _reason, _delay_start_ms) false
#endif
#endif
#endif /*_PHL_ECSA_EXPORT_H_*/
|
2301_81045437/rtl8852be
|
phl/phl_ecsa_export.h
|
C
|
agpl-3.0
| 4,164
|
/******************************************************************************
*
* Copyright(c) 2019 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#define _PHL_EXT_TX_PWR_LMT_C_
#include "phl_headers.h"
/**
* The function to update current TX power limit value to HW register
* @phl: see struct phl_info_t
* @hw_band: 0x0: band0, 0x1: band1
*
*/
enum rtw_phl_status
rtw_phl_set_power_lmt(void *phl, u8 hw_band)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
enum rtw_phl_status phl_status = RTW_PHL_STATUS_FAILURE;
if (rtw_hal_set_power_lmt(phl_info->hal, hw_band)==RTW_HAL_STATUS_SUCCESS)
phl_status = RTW_PHL_STATUS_SUCCESS;
return phl_status;
}
/**
* The function to get TX power limit value with specific parameters
* @phl: see struct phl_info_t
* @hw_band: 0x0: band0, 0x1: band1
* @rate: data rate
* @bandwidth: banddwidth
* @beamforming: 0: TX w/o BF, 1: TX w/ BF
* @tx_num: tx number, 0: 1TX, 1: 2TX
* @channel: center channel
*
*/
s8 rtw_phl_get_power_limit(void *phl, u8 hw_band,
u16 rate, u8 bandwidth, u8 beamforming, u8 tx_num, u8 channel)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
return rtw_hal_rf_get_power_limit(phl_info->hal, hw_band, rate,
bandwidth, beamforming, tx_num, channel);
}
/**
* The function to update user defined extended tx power limit to halrf
* @phl: see struct phl_info_t
* @hw_band: 0x0: band0, 0x1: band1
* @ext_pwr_lmt_info: table of extended tx power limit value
*
* Note,
* This function will enable extended tx power limit mechanism.
* After enabled this mechanism, halrf will use
* min(original tx power limit, extended tx power limit) to be
* final tx power limit value.
*
*/
void
rtw_phl_enable_ext_pwr_lmt(void *phl, u8 hw_band,
struct rtw_phl_ext_pwr_lmt_info *ext_pwr_lmt_info)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct rtw_tpu_ext_pwr_lmt_info tpu_ext_pwr_lmt_info = {0};
u8 i;
for (i = 0; i < HAL_MAX_PATH; i++) {
tpu_ext_pwr_lmt_info.ext_pwr_lmt_2_4g[i]
= ext_pwr_lmt_info->ext_pwr_lmt_2_4g[i];
tpu_ext_pwr_lmt_info.ext_pwr_lmt_5g_band1[i]
= ext_pwr_lmt_info->ext_pwr_lmt_5g_band1[i];
tpu_ext_pwr_lmt_info.ext_pwr_lmt_5g_band2[i]
= ext_pwr_lmt_info->ext_pwr_lmt_5g_band2[i];
tpu_ext_pwr_lmt_info.ext_pwr_lmt_5g_band3[i]
= ext_pwr_lmt_info->ext_pwr_lmt_5g_band3[i];
tpu_ext_pwr_lmt_info.ext_pwr_lmt_5g_band4[i]
= ext_pwr_lmt_info->ext_pwr_lmt_5g_band4[i];
}
rtw_hal_enable_ext_pwr_lmt(phl_info->hal, hw_band,
&tpu_ext_pwr_lmt_info);
}
|
2301_81045437/rtl8852be
|
phl/phl_ext_tx_pwr_lmt.c
|
C
|
agpl-3.0
| 3,026
|
/******************************************************************************
*
* Copyright(c) 2019 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#ifndef _PHL_EXT_TX_PWR_LMT_DEF_H_
#define _PHL_EXT_TX_PWR_LMT_DEF_H_
struct rtw_phl_ext_pwr_lmt_info { /* external tx power limit information */
s8 ext_pwr_lmt_2_4g[RTW_PHL_MAX_RF_PATH];
s8 ext_pwr_lmt_5g_band1[RTW_PHL_MAX_RF_PATH]; /*CH36 ~ CH48*/
s8 ext_pwr_lmt_5g_band2[RTW_PHL_MAX_RF_PATH]; /*CH52 ~ CH64*/
s8 ext_pwr_lmt_5g_band3[RTW_PHL_MAX_RF_PATH]; /*CH100 ~ CH144*/
s8 ext_pwr_lmt_5g_band4[RTW_PHL_MAX_RF_PATH]; /*CH149 ~ CH165*/
};
#endif /*_PHL_EXT_TX_PWR_LMT_DEF_H_*/
|
2301_81045437/rtl8852be
|
phl/phl_ext_tx_pwr_lmt_def.h
|
C
|
agpl-3.0
| 1,140
|
/******************************************************************************
*
* Copyright(c) 2019 - 2020 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* Author: vincent_fann@realtek.com
*
*****************************************************************************/
#include "phl_headers.h"
#ifdef CONFIG_FSM
#define PHL_DEBUG_FSM
#define CLOCK_NUM 10
#define CLOCK_UNIT 10
#define IS_CLK_OFF(clk) (clk->remain < 0) /* Negative value means disabled */
#define IS_CLK_ON(clk) (clk->remain >= 0)
#define IS_CLK_EXP(clk) (clk->remain < (CLOCK_UNIT >> 1)) /* expire */
#define MIN(X, Y) (((X) < (Y)) ? (X) : (Y))
#define MAX(X, Y) (((X) > (Y)) ? (X) : (Y))
#define pstr(s) (s +_os_strlen((u8 *)s))
#define lstr(s, l) (size_t)(l - _os_strlen((u8 *)s))
#define FSM_INITIAL_STATE 0
#ifdef FSM_DBG_MEM_OVERWRITE
void *fsm_kmalloc(u32 sz)
{
char *ptr;
ptr = kmalloc(sz+4, GFP_KERNEL);
memset(ptr+sz, 0xff, 4);
PHL_INFO("+AA %p %d\n", ptr, sz);
return ptr;
}
void fsm_kfree(void *ptr, u32 sz)
{
u32 ptn = 0xffffffff;
u32 *p = (u32 *)(ptr+sz);
PHL_INFO("-AA %p %d", ptr, sz);
if ((*p&ptn) != ptn) {
PHL_ERR("- %p %d", ptr, sz);
PHL_ERR("OVER WRITE %x\n", ptn);
}
kfree(ptr);
}
#define _os_kmem_alloc(a, b) fsm_kmalloc(b)
#define _os_kmem_free(a, b, c) fsm_kfree(b, c)
#endif
struct fsm_event_ent int_event_tbl[] = {
EV_ENT(FSM_INT_EV_MASK),
EV_ENT(FSM_EV_CANCEL),
EV_ENT(FSM_EV_TIMER_EXPIRE),
EV_ENT(FSM_EV_END),
EV_ENT(FSM_EV_SWITCH_IN),
EV_ENT(FSM_EV_SWITCH_OUT),
EV_ENT(FSM_EV_STATE_IN),
EV_ENT(FSM_EV_STATE_OUT),
/* Global event for announcement */
/* BE CAUREFUL the EVENT ORDER
* please also modify enum FSM_EV_ID{} in phl_fsm.h
*/
EV_ENT(FSM_GB_SCAN_START),
EV_ENT(FSM_GB_SCAN_COMPLETE),
EV_ENT(FSM_EV_MAX)
};
/*
* FSM status
*/
enum FSM_STATUS {
FSM_STATUS_NONE, /* default value */
FSM_STATUS_INITIALIZED, /* insert module ok,
* all mem/queue/timer were allocated
* has a pending thread
* phl_fsm_new_fsm()
* phl_fsm_stop_fsm()
*/
FSM_STATUS_READY, /* interface up, schedule thread, timer.
* Does NOT receive message
* phl_fsm_start_fsm()
*/
FSM_STATUS_ENABLE, /* Normal running; Reack msg
* Internal use
* fsm_enable()
*/
FSM_STATUS_DISABLE, /* Does NOT reack msg, able to receiving msg
* Internal use
* fsm_disable()
*/
};
/* @obj: obj that will be infomred to when time's up
* @counter: clock time period
* @event: event that will delivered when time's up
* @end: end time
* @pause: stop countdown
*/
struct fsm_clock {
u16 event;
void *priv;
u8 pause;
u32 start;
u32 end;
int remain; /* ms */
};
struct fsm_queue {
struct list_head q;
_os_lock lock;
};
struct fsm_main {
struct list_head list;
char name[FSM_NAME_LEN];
u8 status;
u8 obj_cnt;
u8 oid_seq; /* starts from 1 */
u8 en_clock_num;
_os_lock clock_lock;
_os_thread thread;
_os_timer fsm_timer; /* unit in ms */
struct fsm_root *root;
struct fsm_queue obj_queue;
struct fsm_queue msg_queue;
struct fsm_queue evt_queue;
_os_sema msg_ready;
bool should_stop;
/* extra custom queue; for fsm private */
struct fsm_queue ext_queue;
struct phl_info_t *phl_info; /* phl_info */
struct rtw_phl_fsm_tb tb;
};
/*
* @event: event id
* @param: additional param of the event
* @param_sz: param size
*/
struct fsm_evt {
struct list_head list;
u16 event; /* event id */
struct fsm_main *fsm;
void *param;
int param_sz;
};
/* @obj_id: object id
* @state: current state
* @prive: object's private date
*/
struct fsm_obj {
struct list_head list;
u8 oid;
u8 state;
char name[FSM_NAME_LEN];
struct fsm_clock clock[CLOCK_NUM];
struct fsm_main *fsm;
void *custom_obj;
int custom_len; /* custom obj length */
/* Global event use */
struct gbl_param my_gbl_req; /* my announcemnt to all */
struct fsm_queue gbl_queue; /* all received global events */
u16 gbl_q_len; /* number of received global event */
};
/* Main structure to handle all standalone fsm */
struct fsm_root {
_os_thread thread;
struct list_head list;
struct fsm_queue q_share_thd;
struct fsm_queue q_alone_thd;
struct phl_info_t *phl_info;
u8 gbl_seq;
_os_sema msg_ready;
u32 status; /* refer to enum FSM_ROOT_STATUS_FLAGS */
};
/* Static function porto type */
static int fsm_handler(struct fsm_main *fsm);
static char *fsm_state_name(struct fsm_main *fsm, u8 state);
static u8 fsm_get_evt_level(struct fsm_main *fsm, u16 event);
static void fsm_status_set(struct fsm_main *fsm, enum FSM_STATUS status)
{
fsm->status = status;
}
static enum FSM_STATUS fsm_status(struct fsm_main *fsm)
{
return fsm->status;
}
/* unit ms */
u32 phl_fsm_time_pass(u32 start)
{
u32 now = _os_get_cur_time_ms();
u32 pass;
if (start <= now)
pass = now - start;
else
pass = 0xffffffff - start + now;
return pass;
}
u32 phl_fsm_time_left(u32 start, u32 end)
{
u32 total, pass;
int left = 0;
pass = phl_fsm_time_pass(start);
if (end >= start)
total = end - start;
else
total = 0xffffffff - start + end;
left = total - pass;
if (left < 0)
left = 0;
return (u32)left;
}
#if 0
static struct fsm_main *fsm_dequeue_fsm(struct fsm_root *root, u8 fsm_mode)
{
void *d = phl_to_drvpriv(root->phl_info);
struct fsm_main *fsm;
struct fsm_queue *queue = (fsm_mode == FSM_ALONE_THREAD) ?
(&root->q_alone_thd) : (&root->q_share_thd);
if (list_empty(&queue->q))
return NULL;
_os_spinlock(d, &queue->lock, _bh, NULL);
fsm = list_first_entry(&queue->q, struct fsm_main, list);
list_del(&fsm->list);
_os_spinunlock(d, &queue->lock, _bh, NULL);
return fsm;
}
static struct fsm_obj *fsm_dequeue_obj(struct fsm_main *fsm)
{
void *d = phl_to_drvpriv(fsm->phl_info);
struct fsm_obj *obj;
if (list_empty(&fsm->obj_queue.q))
return NULL;
_os_spinlock(d, &fsm->obj_queue.lock, _bh, NULL);
obj = list_first_entry(&fsm->obj_queue.q, struct fsm_obj, list);
list_del(&obj->list);
_os_spinunlock(d, &fsm->obj_queue.lock, _bh, NULL);
return obj;
}
#endif
static struct fsm_msg *fsm_dequeue_msg(struct fsm_main *fsm)
{
void *d = phl_to_drvpriv(fsm->phl_info);
struct fsm_msg *msg;
if (list_empty(&fsm->msg_queue.q))
return NULL;
_os_spinlock(d, &fsm->msg_queue.lock, _bh, NULL);
msg = list_first_entry(&fsm->msg_queue.q, struct fsm_msg, list);
list_del(&msg->list);
_os_spinunlock(d, &fsm->msg_queue.lock, _bh, NULL);
return msg;
}
static struct fsm_evt *fsm_dequeue_evt(struct fsm_main *fsm)
{
void *d = phl_to_drvpriv(fsm->phl_info);
struct fsm_evt *evt;
if (list_empty(&fsm->evt_queue.q))
return NULL;
_os_spinlock(d, &fsm->evt_queue.lock, _bh, NULL);
evt = list_first_entry(&fsm->evt_queue.q, struct fsm_evt, list);
list_del(&evt->list);
_os_spinunlock(d, &fsm->evt_queue.lock, _bh, NULL);
return evt;
}
/* For EXTERNAL application to enqueue message to extra queue (expose)
*
* @fsm: fsm that object belonged to
* @msg: message to be enqueued
* @to_head: enqueue message to the head
*/
int phl_fsm_enqueue_ext(struct fsm_main *fsm, struct fsm_msg *msg, u8 to_head)
{
void *d = phl_to_drvpriv(fsm->phl_info);
struct fsm_queue *queue = &fsm->ext_queue;
_os_spinlock(d, &queue->lock, _bh, NULL);
if (to_head)
list_add(&msg->list, &queue->q);
else
list_add_tail(&msg->list, &queue->q);
_os_spinunlock(d, &queue->lock, _bh, NULL);
return 0;
}
/* For EXTERNAL application to dequeue message from extra queue (expose)
*
* @fsm: fsm that object belonged to
*/
struct fsm_msg *phl_fsm_dequeue_ext(struct fsm_main *fsm)
{
void *d = phl_to_drvpriv(fsm->phl_info);
struct fsm_msg *msg;
if (list_empty(&fsm->ext_queue.q))
return NULL;
_os_spinlock(d, &fsm->ext_queue.lock, _bh, NULL);
msg = list_first_entry(&fsm->ext_queue.q, struct fsm_msg, list);
list_del(&msg->list);
_os_spinunlock(d, &fsm->ext_queue.lock, _bh, NULL);
return msg;
}
/* For EXTERNAL application to dequeue message from extra queue (expose)
*
* @fsm: fsm that object belonged to
*/
int phl_fsm_is_ext_queue_empty(struct fsm_main *fsm)
{
return list_empty(&fsm->ext_queue.q);
}
static int fsm_new_oid(struct fsm_main *fsm)
{
u8 oid = fsm->oid_seq++;
if (fsm->oid_seq == 0xFF) {
PHL_WARN("%s: reach MAX object ID 0x%x\n",
fsm->name, oid);
}
return oid;
}
static int fsm_enqueue_list(void *d, struct fsm_main *fsm,
struct fsm_queue *queue, struct list_head *list)
{
_os_spinlock(d, &queue->lock, _bh, NULL);
list_add_tail(list, &queue->q);
_os_spinunlock(d, &queue->lock, _bh, NULL);
return 0;
}
static enum fsm_run_rtn fsm_state_run(struct fsm_obj *obj,
u16 event, void *param)
{
struct fsm_main *fsm = obj->fsm;
/* TODO protect incorrect event */
FSM_EV_MSG(fsm, fsm_get_evt_level(fsm, event),
"%s-%d %-18s %s\n", fsm->name, obj->oid,
fsm_state_name(fsm, obj->state), phl_fsm_evt_name(obj, event));
return fsm->tb.state_tbl[obj->state].fsm_func(obj->custom_obj,
event, param);
}
static void fsm_remove_all_queuing_msg(struct fsm_main *fsm)
{
struct fsm_msg *msg;
struct fsm_evt *evt;
void *d = phl_to_drvpriv(fsm->phl_info);
/* go through msg queue and free everything */
while ((msg = fsm_dequeue_msg(fsm)) != NULL) {
if (msg->param)
_os_kmem_free(d, (void *)msg->param, msg->param_sz);
_os_kmem_free(d, (void *)msg, sizeof(*msg));
}
/* go through event queue and free everything */
while ((evt = fsm_dequeue_evt(fsm)) != NULL) {
if (evt->param)
_os_kmem_free(d, (void *)evt->param, evt->param_sz);
_os_kmem_free(d, (void *)evt, sizeof(*evt));
}
/* go through ext queue and free everything */
while ((msg = phl_fsm_dequeue_ext(fsm)) != NULL) {
if (msg->param)
_os_kmem_free(d, (void *)msg->param, msg->param_sz);
_os_kmem_free(d, (void *)msg, sizeof(*msg));
}
}
static int fsm_cancel_all_running_obj(struct fsm_main *fsm)
{
struct fsm_obj *obj;
phl_list_for_loop(obj, struct fsm_obj, &fsm->obj_queue.q, list) {
phl_fsm_gen_msg(fsm->phl_info, obj, NULL, 0, FSM_EV_CANCEL);
}
return 0;
}
u8 phl_fsm_dbg_level(struct fsm_main *fsm, u8 level)
{
if (fsm->tb.dbg_level >= level)
return fsm->tb.dbg_level;
return 0;
}
u8 phl_fsm_evt_level(struct fsm_main *fsm, u8 level)
{
if (fsm->tb.evt_level >= level)
return fsm->tb.evt_level;
return 0;
}
static u8 fsm_get_evt_level(struct fsm_main *fsm, u16 event)
{
u16 ev;
/* fsm internal event */
if (event & FSM_INT_EV_MASK) {
ev = (u8)(event & ~(FSM_EV_MASK));
return int_event_tbl[ev].evt_level;
}
if (event == FSM_EV_UNKNOWN)
return FSM_DBG_INFO;
if (event > fsm->tb.max_event)
return FSM_DBG_INFO;
/* user event */
return fsm->tb.evt_tbl[event].evt_level;
}
static void fsm_init_queue(void *d, struct fsm_queue *queue)
{
INIT_LIST_HEAD(&queue->q);
_os_spinlock_init(d, &queue->lock);
}
static void fsm_deinit_queue(void *d, struct fsm_queue *queue)
{
_os_spinlock_free(d, &queue->lock);
}
/* For External obj to check sould stop status
*
* @fsm: fsm to get state
*/
bool phl_fsm_should_stop(struct fsm_main *fsm)
{
return fsm->should_stop;
}
int fsm_thread_share(void *param)
{
struct fsm_main *fsm, *fsm_t;
struct fsm_root *root = (struct fsm_root *)param;
void *d = phl_to_drvpriv(root->phl_info);
while (1) {
_os_sema_down(d, &root->msg_ready);
if (_os_thread_check_stop(d, &(root->thread)))
break;
phl_list_for_loop_safe(fsm, fsm_t,
struct fsm_main, &root->q_share_thd.q, list) {
if (fsm_status(fsm) == FSM_STATUS_ENABLE)
fsm_handler(fsm);
}
}
_os_thread_wait_stop(d, &root->thread);
PHL_INFO("fsm: [root] thread down\n");
return 0;
}
int fsm_thread_alone(void *param)
{
struct fsm_main *fsm = (struct fsm_main *)param;
void *d = phl_to_drvpriv(fsm->phl_info);
while (1) {
_os_sema_down(d, &fsm->msg_ready);
if (_os_thread_check_stop(d, &(fsm->thread)))
break;
if (fsm_status(fsm) == FSM_STATUS_ENABLE)
fsm_handler(fsm);
}
_os_thread_wait_stop(d, &fsm->thread);
FSM_INFO(fsm, "fsm: [%s] thread down\n", fsm->name);
return 0;
}
static struct fsm_obj *fsm_get_obj(struct fsm_main *fsm, u8 oid)
{
struct fsm_obj *obj, *obj_t;
void *d = phl_to_drvpriv(fsm->phl_info);
_os_spinlock(d, &fsm->obj_queue.lock, _bh, NULL);
phl_list_for_loop_safe(obj, obj_t,
struct fsm_obj, &fsm->obj_queue.q, list) {
if (oid == (obj->oid)) {
_os_spinunlock(d, &fsm->obj_queue.lock, _bh, NULL);
return obj;
}
}
_os_spinunlock(d, &fsm->obj_queue.lock, _bh, NULL);
return NULL;
}
struct fsm_msg *phl_fsm_new_msg(struct fsm_obj *obj, u16 event)
{
#ifdef PHL_INCLUDE_FSM
struct fsm_msg *msg = NULL;
void *d = phl_to_drvpriv(obj->fsm->phl_info);
if (fsm_status(obj->fsm) != FSM_STATUS_ENABLE) {
PHL_ERR("%s: is out of service, ignore message %s!\n",
obj->fsm->name, phl_fsm_evt_name(obj, event));
return NULL;
}
msg = (struct fsm_msg *)_os_kmem_alloc(d, sizeof(*msg));
if (msg == NULL)
return NULL;
_os_mem_set(d, msg, 0, sizeof(*msg));
msg->event = event;
if (obj) {
msg->fsm = obj->fsm;
msg->oid = obj->oid;
}
return msg;
#else
PHL_WARN("fsm: %s exclude FSM\n", __func__);
return NULL;
#endif
}
enum rtw_phl_status phl_fsm_sent_msg(struct fsm_obj *obj, struct fsm_msg *msg)
{
void *d = phl_to_drvpriv(obj->fsm->phl_info);
if (fsm_status(obj->fsm) != FSM_STATUS_ENABLE) {
PHL_ERR("fsm: %s is out of service, ignore message %s!\n",
obj->fsm->name, phl_fsm_evt_name(obj, msg->event));
return RTW_PHL_STATUS_RESOURCE;
}
fsm_enqueue_list(d, obj->fsm, &obj->fsm->msg_queue, &msg->list);
if (obj->fsm->tb.mode == FSM_ALONE_THREAD)
_os_sema_up(d, &obj->fsm->msg_ready);
else
_os_sema_up(d, &obj->fsm->root->msg_ready);
return RTW_PHL_STATUS_SUCCESS;
}
static struct fsm_msg *fsm_new_timer_msg(struct fsm_obj *obj,
u16 event, void *priv)
{
struct fsm_msg *msg = NULL;
void *d = phl_to_drvpriv(obj->fsm->phl_info);
msg = (struct fsm_msg *)_os_kmem_alloc(d, sizeof(*msg));
if (msg == NULL)
return msg;
_os_mem_set(d, msg, 0, sizeof(*msg));
msg->event = event;
msg->oid = obj->oid;
msg->param = priv;
return msg;
}
static int fsm_post_message(struct fsm_obj *obj, u16 event, void *priv)
{
struct fsm_msg *msg;
struct fsm_main *fsm = obj->fsm;
void *d = phl_to_drvpriv(obj->fsm->phl_info);
msg = fsm_new_timer_msg(obj, event, priv);
if (msg == NULL)
return -1;
fsm_enqueue_list(d, fsm, &fsm->msg_queue, &msg->list);
if (obj->fsm->tb.mode == FSM_ALONE_THREAD)
_os_sema_up(d, &fsm->msg_ready);
else
_os_sema_up(d, &fsm->root->msg_ready);
return 0;
}
void fsm_timer_callback(void *context)
{
struct fsm_main *fsm = (struct fsm_main *)context;
void *d = phl_to_drvpriv(fsm->phl_info);
struct fsm_obj *obj;
struct fsm_clock *clk;
int i;
_os_set_timer(d, &fsm->fsm_timer, CLOCK_UNIT);
if (fsm->en_clock_num == 0)
return;
/* go through clock and descrease timer
* if timer was expired, issue event
*/
phl_list_for_loop(obj, struct fsm_obj, &fsm->obj_queue.q, list) {
_os_spinlock(d, &obj->fsm->clock_lock, _bh, NULL);
for (i = 0; i < CLOCK_NUM; i++) {
clk = &obj->clock[i];
if (IS_CLK_OFF(clk) || clk->pause)
continue;
clk->remain = (int)phl_fsm_time_left(clk->start,
clk->end);
//(clk->remain < 0 ) ? 0 : clk->remain;
/* timer expired */
if (!IS_CLK_EXP(clk))
continue;
#ifdef PHL_DBG_FSM
FSM_DBG(obj->fsm, "%s: expire in %d ms\n",
phl_fsm_evt_name(obj, clk->event),
phl_fsm_time_pass(clk->start));
#endif
clk->end = 0;
clk->remain = -1;
/* send message to obj */
/* check fsm status before posting */
if (fsm_status(fsm) != FSM_STATUS_INITIALIZED &&
fsm_status(fsm) != FSM_STATUS_DISABLE)
fsm_post_message(obj, clk->event, clk->priv);
fsm->en_clock_num--;
}
_os_spinunlock(d, &obj->fsm->clock_lock, _bh, NULL);
}
}
/* allocate and init fsm resource */
struct fsm_main *phl_fsm_init_fsm(struct fsm_root *root, const char *name,
void *priv, struct rtw_phl_fsm_tb *tb)
{
#ifdef PHL_INCLUDE_FSM
struct fsm_main *fsm;
struct phl_info_t *phl_info = (struct phl_info_t *)priv;
void *d = phl_to_drvpriv(phl_info);
//char name_t[FSM_NAME_LEN+10];
/* check event table */
if (tb->evt_tbl[tb->max_event-1].event != tb->max_event-1) {
PHL_ERR("Event mismatch ? Is max event = %d != %d ?\n",
tb->evt_tbl[tb->max_event-1].event,
tb->max_event-1);
return NULL;
}
/* check state table */
if (tb->state_tbl[tb->max_state-1].state != tb->max_state-1) {
PHL_ERR("State mismatch ? Is max state = %d != %d) ?\n",
tb->state_tbl[tb->max_state-1].state,
tb->max_state-1);
return NULL;
}
fsm = (struct fsm_main *)_os_kmem_alloc(d, sizeof(*fsm));
if (fsm == NULL)
return NULL;
_os_mem_set(d, fsm, 0, sizeof(*fsm));
_os_mem_cpy(d, &fsm->tb, (void *)tb, sizeof(*tb));
_os_mem_cpy(d, &fsm->name, (void *)name,
MIN(FSM_NAME_LEN-1, _os_strlen((u8 *)name)));
fsm->root = root;
fsm->phl_info = phl_info;
fsm_init_queue(d, &(fsm->obj_queue));
fsm_init_queue(d, &(fsm->msg_queue));
fsm_init_queue(d, &(fsm->evt_queue));
fsm_init_queue(d, &(fsm->ext_queue));
_os_spinlock_init(d, &fsm->clock_lock);
_os_init_timer(d, &fsm->fsm_timer, fsm_timer_callback, fsm, "fsm");
fsm->oid_seq = 1;
/* link fsm_main to fsm_root */
if (tb->mode == FSM_ALONE_THREAD) {
_os_sema_init(d, &fsm->msg_ready, 0);
fsm_enqueue_list(d, fsm, &root->q_alone_thd, &fsm->list);
} else
fsm_enqueue_list(d, fsm, &root->q_share_thd, &fsm->list);
FSM_INFO(fsm, "fsm: [%s] initialized\n", fsm->name);
fsm_status_set(fsm, FSM_STATUS_INITIALIZED);
return fsm;
#else
PHL_WARN("fsm: %s exclude FSM\n", __func__);
return NULL;
#endif /* PHL_INCLUDE_FSM */
}
/* For EXTERNAL application to deinit fsm (expose)
* @fsm: see struct fsm_main
*/
enum rtw_phl_status phl_fsm_deinit_fsm(struct fsm_main *fsm)
{
void *d = phl_to_drvpriv(fsm->phl_info);
struct fsm_obj *obj, *obj_t;
_os_release_timer(d, &fsm->fsm_timer);
/* remove fsm form link list */
list_del(&fsm->list);
phl_list_for_loop_safe(obj, obj_t,
struct fsm_obj, &fsm->obj_queue.q, list) {
list_del(&obj->list);
phl_fsm_flush_gbl(obj);
fsm_deinit_queue(d, &(obj->gbl_queue));
/* free custom_obj */
_os_kmem_free(d, obj->custom_obj, obj->custom_len);
/* free fsm_obj */
_os_kmem_free(d, obj, sizeof(*obj));
}
fsm_deinit_queue(d, &(fsm->obj_queue));
fsm_deinit_queue(d, &(fsm->msg_queue));
fsm_deinit_queue(d, &(fsm->evt_queue));
fsm_deinit_queue(d, &(fsm->ext_queue));
_os_spinlock_free(d, &fsm->clock_lock);
if (fsm->tb.mode == FSM_ALONE_THREAD)
_os_sema_free(d, &fsm->msg_ready);
FSM_INFO(fsm, "fsm: [%s] uninitilized\n", fsm->name);
_os_kmem_free(d, fsm, sizeof(*fsm));
return RTW_PHL_STATUS_SUCCESS;
}
char *phl_fsm_evt_name(struct fsm_obj *obj, u16 event)
{
struct fsm_main *fsm = obj->fsm;
u8 ev;
/* TODO handle global, internal, user event */
/* global event */
if (event & FSM_GBL_EV_MASK)
return "global";
/* fsm internal event */
if (event & FSM_INT_EV_MASK) {
ev = (u8)(event & ~(FSM_EV_MASK));
return int_event_tbl[ev].name;
}
if (event == FSM_EV_UNKNOWN)
return "FSM_EV_UNKNOWN";
if (event > fsm->tb.max_event)
return "undefine";
/* user event */
return fsm->tb.evt_tbl[event].name;
}
static char *fsm_state_name(struct fsm_main *fsm, u8 state)
{
if (state > fsm->tb.max_state)
return "unknown";
return fsm->tb.state_tbl[state].name;
}
/* For EXTERNAL application to get state id (expose)
*
* @obj: obj to get state
*/
u8 phl_fsm_state_id(struct fsm_obj *obj)
{
return obj->state;
}
/** init obj internal variable
*
* @fsm: fsm that object belonged to
* default init to the 1st state in state_tbl
*/
static void fsm_obj_switch_in(struct fsm_obj *obj)
{
struct fsm_main *fsm = obj->fsm;
//void *d = phl_to_drvpriv(fsm->phl_info);
/* default init to the 1st state in state_tbl */
obj->state = fsm->tb.state_tbl[0].state;
FSM_INFO(fsm, "%s-%d %-18s -> %s\n", fsm->name, obj->oid,
"switch in", fsm_state_name(fsm, obj->state));
/* make it alive! Hello OBJ! */
fsm_state_run(obj, FSM_EV_SWITCH_IN, NULL);
}
/** deinit obj internal variable
*
* @fsm: fsm that object belonged to
* default init to the 1st state in state_tbl
*/
static void fsm_obj_switch_out(struct fsm_obj *obj)
{
struct fsm_main *fsm = obj->fsm;
//void *d = phl_to_drvpriv(fsm->phl_info);
/* default init to the 1st state in state_tbl */
obj->state = fsm->tb.state_tbl[0].state;
FSM_INFO(fsm, "%s-%d %-18s -> %s\n", fsm->name, obj->oid,
"switch out", fsm_state_name(fsm, obj->state));
/* make it alive! Hello OBJ! */
fsm_state_run(obj, FSM_EV_SWITCH_OUT, NULL);
}
/* For EXTERNAL application to new a fsm object (expose)
*
* @fsm: fsm that object belonged to
* @fsm_obj: obj param when calling FSM framework function
* @priv_len: custom obj length
*
* return value: NULL :fail
* other :cusomer obj handler (success)
*/
void *phl_fsm_new_obj(struct fsm_main *fsm,
void **fsm_obj, int sz)
{
#ifdef PHL_INCLUDE_FSM
void *d = phl_to_drvpriv(fsm->phl_info);
struct fsm_obj *obj;
int i;
obj = (struct fsm_obj *)_os_kmem_alloc(d, sizeof(*obj));
if (obj == NULL)
return NULL;
_os_mem_set(d, obj, 0, sizeof(*obj));
obj->custom_obj = _os_kmem_alloc(d, sz);
if (obj->custom_obj == NULL) {
_os_kmem_free(d, obj, sizeof(*obj));
return NULL;
}
_os_mem_set(d, obj->custom_obj, 0, sz);
for (i = 0; i < CLOCK_NUM; i++)
obj->clock[i].remain = -1; /* Negative means disable */
fsm_init_queue(d, &(obj->gbl_queue));
obj->custom_len = sz;
obj->oid = (u8)fsm_new_oid(fsm);
obj->fsm = fsm;
_os_mem_set(d, obj->name, 0, FSM_NAME_LEN);
_os_snprintf(obj->name, FSM_NAME_LEN,
"%s-%d", obj->fsm->name, obj->oid);
*fsm_obj = obj;
fsm_enqueue_list(d, fsm, &fsm->obj_queue, &obj->list);
return obj->custom_obj;
#else
PHL_WARN("fsm: %s exclude FSM\n", __func__);
return NULL;
#endif /* PHL_INCLUDE_FSM */
}
/* For EXTERNAL application to destory a fsm object (expose)
*
* @fsm_obj: obj param when calling FSM framework function
*/
void phl_fsm_destory_obj(struct fsm_obj *obj)
{
struct fsm_main *fsm = obj->fsm;
void *d = phl_to_drvpriv(fsm->phl_info);
list_del(&obj->list);
phl_fsm_flush_gbl(obj);
fsm_deinit_queue(d, &(obj->gbl_queue));
/* free custom_obj */
_os_kmem_free(d, obj->custom_obj, obj->custom_len);
/* free fsm_obj */
_os_kmem_free(d, obj, sizeof(*obj));
}
bool phl_fsm_is_alarm_off_ext(struct fsm_obj *obj, u8 id)
{
struct fsm_clock *clock = &obj->clock[id];
return IS_CLK_OFF(clock);
}
bool phl_fsm_is_alarm_off(struct fsm_obj *obj)
{
struct fsm_clock *clock = &obj->clock[0];
return IS_CLK_OFF(clock);
}
static void fsm_set_alarm(struct fsm_obj *obj, int ms,
u16 event, u8 id, void *priv)
{
void *d = phl_to_drvpriv(obj->fsm->phl_info);
struct fsm_clock *clock = &obj->clock[id];
u32 now;
if (ms == 0)
fsm_post_message(obj, event, priv);
_os_spinlock(d, &obj->fsm->clock_lock, _bh, NULL);
/* turn on clock from off */
if (IS_CLK_OFF(clock))
obj->fsm->en_clock_num++;
now = _os_get_cur_time_ms();
clock->event = event;
clock->priv = priv;
clock->start = now;
clock->end = now + ms;
clock->remain = (int)phl_fsm_time_left(clock->start, clock->end);
_os_spinunlock(d, &obj->fsm->clock_lock, _bh, NULL);
#ifdef PHL_DBG_FSM
FSM_DBG(obj->fsm, "%s:%s now=0x%08x, end=0x%08x, remain=0x%08x\n",
phl_fsm_obj_name(obj), phl_fsm_evt_name(obj, event),
clock->start, clock->end, clock->remain);
#endif
}
/* For EXTERNAL application to extend alarm time (expose)
*
* @obj: obj param when calling FSM framework function
* @event: alarm will issue this event while timer expired
* @ms: time period for the alarm
* remain time does not less than 'ms'
* @id: alarm id; start from 1
*/
void phl_fsm_extend_alarm_ext(struct fsm_obj *obj, int ms, u8 id)
{
struct fsm_clock *clk = &obj->clock[id];
int remain = ms;
if (id == 0 || id >= CLOCK_NUM) {
PHL_ERR("%s: %s_%d fail\n",
phl_fsm_obj_name(obj), __func__, id);
return;
}
if (IS_CLK_OFF(clk))
return;
remain = MAX((int)phl_fsm_time_left(clk->start, clk->end), ms);
phl_fsm_set_alarm_ext(obj, remain, clk->event, id, clk->priv);
}
/* For EXTERNAL application to setup alarm (expose)
*
* @obj: obj param when calling FSM framework function
* @event: alarm will issue this event while timer expired
* @ms: time period for the alarm
* @id: alarm id; start from 1
*/
void phl_fsm_set_alarm(struct fsm_obj *obj, int ms, u16 event)
{
fsm_set_alarm(obj, ms, event, 0, NULL);
}
/* For EXTERNAL application to setup alarm_ext (expose)
*
* @obj: obj param when calling FSM framework function
* @event: alarm will issue this event while timer expired
* @ms: time period for the alarm
* @id: alarm id; start from 1
* @priv: priv from caller
*/
void phl_fsm_set_alarm_ext(struct fsm_obj *obj,
int ms, u16 event, u8 id, void *priv)
{
if (id >= CLOCK_NUM) {
PHL_ERR("%s: set alarm_ext_%d to %d ms fail\n",
phl_fsm_obj_name(obj), id, ms);
return;
}
fsm_set_alarm(obj, ms, event, id, priv);
}
static void fsm_cancel_alarm(struct fsm_obj *obj, u8 id)
{
void *d = phl_to_drvpriv(obj->fsm->phl_info);
struct fsm_clock *clock = &obj->clock[id];
_os_spinlock(d, &obj->fsm->clock_lock, _bh, NULL);
/* turn off clock from on */
if (IS_CLK_ON(clock))
obj->fsm->en_clock_num--;
//obj->clock[id].counter = -1;
obj->clock[id].end = 0;
obj->clock[id].remain = -1;
obj->clock[id].pause = 0;
_os_spinunlock(d, &obj->fsm->clock_lock, _bh, NULL);
}
/* For EXTERNAL application to cancel alarm (expose)
*
* @obj: obj param when calling FSM framework function
*/
void phl_fsm_cancel_alarm(struct fsm_obj *obj)
{
fsm_cancel_alarm(obj, 0);
}
/* For EXTERNAL application to cancel alarm_ext (expose)
*
* @obj: obj param when calling FSM framework function
* @id: alarm id; start from 1
*/
void phl_fsm_cancel_alarm_ext(struct fsm_obj *obj, u8 id)
{
if (id == 0 || id >= CLOCK_NUM) {
PHL_ERR("%s: cancel alarm_ext_%d fail\n",
phl_fsm_obj_name(obj), id);
return;
}
fsm_cancel_alarm(obj, id);
}
static void fsm_pause_alarm(struct fsm_obj *obj, u8 id)
{
void *d = phl_to_drvpriv(obj->fsm->phl_info);
_os_spinlock(d, &obj->fsm->clock_lock, _bh, NULL);
obj->clock[id].pause = 1;
_os_spinunlock(d, &obj->fsm->clock_lock, _bh, NULL);
}
/* For EXTERNAL application to pause alarm (expose)
*
* @obj: obj param when calling FSM framework function
*/
void phl_fsm_pause_alarm(struct fsm_obj *obj)
{
fsm_pause_alarm(obj, 0);
}
/* For EXTERNAL application to pause alarm_ext (expose)
*
* @obj: obj param when calling FSM framework function
* @id: alarm id; start from 1
*/
void phl_fsm_pause_alarm_ext(struct fsm_obj *obj, u8 id)
{
if (id == 0 || id >= CLOCK_NUM) {
PHL_ERR("%s: pause alarm_%d fail\n", phl_fsm_obj_name(obj), id);
return;
}
fsm_pause_alarm(obj, id);
}
static void fsm_resume_alarm(struct fsm_obj *obj, u8 id)
{
void *d = phl_to_drvpriv(obj->fsm->phl_info);
u32 cur = _os_get_cur_time_ms();
/* extrend end time */
_os_spinlock(d, &obj->fsm->clock_lock, _bh, NULL);
obj->clock[id].end = cur + obj->clock[id].remain;
obj->clock[id].pause = 0;
_os_spinunlock(d, &obj->fsm->clock_lock, _bh, NULL);
}
/* For EXTERNAL application to resume alarm (expose)
*
* @obj: obj param when calling FSM framework function
*/
void phl_fsm_resume_alarm(struct fsm_obj *obj)
{
fsm_resume_alarm(obj, 0);
}
/* For EXTERNAL application to resume alarm_ext (expose)
*
* @obj: obj param when calling FSM framework function
* @id: alarm id; start from 1
*/
void phl_fsm_resume_alarm_ext(struct fsm_obj *obj, u8 id)
{
if (id == 0 || id >= CLOCK_NUM) {
PHL_ERR("%s: resume alarm_ext_%d fail\n",
phl_fsm_obj_name(obj), id);
return;
}
fsm_resume_alarm(obj, id);
}
/* For EXTERNAL application to change state (expose)
*
* @obj: obj that changes state
* @new_state: new state
*/
void phl_fsm_state_goto(struct fsm_obj *obj, u8 new_state)
{
struct fsm_main *fsm = NULL;
if (obj->state == new_state)
return;
fsm = obj->fsm;
fsm_state_run(obj, FSM_EV_STATE_OUT, NULL);
FSM_MSG(fsm, FSM_DBG_DBG, "\n");
FSM_MSG(fsm, FSM_DBG_DBG, "%s-%d %-18s -> %s\n", fsm->name, obj->oid,
fsm_state_name(fsm, obj->state),
fsm_state_name(fsm, new_state));
obj->state = new_state; /* new state */
fsm_state_run(obj, FSM_EV_STATE_IN, NULL);
}
static void fsm_user_evt_handler(struct fsm_main *fsm)
{
void *d = phl_to_drvpriv(fsm->phl_info);
struct fsm_msg *msg;
struct fsm_obj *obj;
int rtn = FSM_FREE_PARAM;
while ((msg = fsm_dequeue_msg(fsm)) != NULL) {
rtn = FSM_FREE_PARAM;
obj = fsm_get_obj(fsm, msg->oid);
if (obj == NULL) {
PHL_WARN("%s-%d: obj not found\n",
fsm->name, msg->oid);
goto obj_not_found;
}
/* DO NOT deliver event when fsm->should_stop is true */
if ((fsm->should_stop == true) &&
(obj->state == FSM_INITIAL_STATE) &&
(msg->event < FSM_INT_EV_MASK)) {
PHL_INFO("%s: should stop skip msg %s\n",
phl_fsm_obj_name(obj),
phl_fsm_evt_name(obj, msg->event));
goto skip_msg;
}
/* run state machine */
rtn = fsm_state_run(obj, msg->event, msg->param);
skip_msg:
obj_not_found:
if ((rtn == FSM_FREE_PARAM) &&
(msg->param_sz > 0) &&
(msg->param != NULL))
_os_kmem_free(d, (void *)msg->param, msg->param_sz);
_os_kmem_free(d, (void *)msg, sizeof(*msg));
}
}
static int fsm_update_status(struct fsm_main *fsm)
{
struct fsm_obj *obj;
phl_list_for_loop(obj, struct fsm_obj, &fsm->obj_queue.q, list) {
if (obj->state != FSM_INITIAL_STATE) {
PHL_INFO("%s: state %s\n",
phl_fsm_obj_name(obj),
fsm_state_name(fsm, obj->state));
return 0;
}
}
/* all objs are at INITAL_STATE
* fsm module is ready to stop
*/
fsm_status_set(fsm, FSM_STATUS_INITIALIZED);
return 0;
}
static int fsm_handler(struct fsm_main *fsm)
{
/* USER EVENT */
fsm_user_evt_handler(fsm);
if (fsm->should_stop == true)
fsm_update_status(fsm);
return 0;
}
/* For EXTERNAL application to get fsm name (expose)
* @fsm: fsm to be get name
*/
char *phl_fsm_fsm_name(struct fsm_main *fsm)
{
return fsm->name;
}
/* For EXTERNAL application to get obj name (expose)
* @obj: obj to be get name
* For example: scan-1 (sacn obj with object id 1)
*/
char *phl_fsm_obj_name(struct fsm_obj *obj)
{
return obj->name;
}
/* For EXTERNAL application to cancel sma (expose)
* @obj: obj job will be cancelled
*/
enum rtw_phl_status phl_fsm_cancel_obj(struct fsm_obj *obj)
{
void *d = phl_to_drvpriv(obj->fsm->phl_info);
struct fsm_msg *msg;
int rtn;
/* NEW message to cancel obj task */
msg = phl_fsm_new_msg(obj, FSM_EV_CANCEL);
if (msg == NULL) {
PHL_ERR("%s: alloc msg fail\n", obj->fsm->name);
return RTW_PHL_STATUS_RESOURCE;
}
rtn = phl_fsm_sent_msg(obj, msg);
if (rtn != RTW_PHL_STATUS_SUCCESS)
_os_kmem_free(d, msg, sizeof(*msg));
return rtn;
}
/* For EXTERNAL application to init FSM framework (expose) */
/* @obj: obj job will be cancelled
*/
struct fsm_root *phl_fsm_init_root(void *priv)
{
#ifdef PHL_INCLUDE_FSM
struct fsm_root *root;
struct phl_info_t *phl_info = (struct phl_info_t *)priv;
void *d = phl_to_drvpriv(phl_info);
int max, size;
/* check size of internal event table */
max = FSM_EV_MAX & ~(int_event_tbl[0].event);
size = sizeof(int_event_tbl)/sizeof(int_event_tbl)[0];
if (size != max + 1) {
PHL_ERR("fsm: int_event_tbl[%d] != %d size mismatch!!",
size, max);
return NULL;
}
root = (struct fsm_root *)_os_kmem_alloc(d, sizeof(*root));
if (root == NULL)
return NULL;
_os_mem_set(d, root, 0, sizeof(*root));
fsm_init_queue(d, &(root->q_share_thd));
fsm_init_queue(d, &(root->q_alone_thd));
_os_sema_init(d, &root->msg_ready, 0);
root->phl_info = phl_info;
PHL_INFO("fsm: [root] initialized\n");
return root;
#else
PHL_WARN("fsm: %s exclude FSM\n", __func__);
return 0;
#endif /* PHL_INCLUDE_FSM */
}
/* For EXTERNAL application to deinit FSM framework (expose)
* @root: FSM framework handler
*/
void phl_fsm_deinit_root(struct fsm_root *root)
{
#ifdef PHL_INCLUDE_FSM
void *d = phl_to_drvpriv(root->phl_info);
void *c = NULL;
fsm_deinit_queue(d, &(root->q_alone_thd));
fsm_deinit_queue(d, &(root->q_share_thd));
_os_sema_free(d, &root->msg_ready);
/* free fsm_root */
_os_kmem_free(d, root, sizeof(*root));
FSM_INFO(c, "fsm: [root] uninitilized\n");
#else
PHL_WARN("fsm: %s exclude FSM\n", __func__);
#endif /* PHL_INCLUDE_FSM */
}
/* For EXTERNAL application to start fsm root (expose)
* @fsm: see struct fsm_main
*/
enum rtw_phl_status phl_fsm_start_root(struct fsm_root *root)
{
void *d = phl_to_drvpriv(root->phl_info);
#ifdef CONFIG_LINUX_THREAD
root->thread = kthread_create(fsm_thread_share, root,
"fsm_thread_share");
wake_up_process(root->thread);
#else
_os_thread_init(d, &(root->thread), fsm_thread_share, root,
"fsm_thread_share");
_os_thread_schedule(d, &(root->thread));
#endif
return RTW_PHL_STATUS_SUCCESS;
}
/* For EXTERNAL application to stop fsm root (expose)
* @fsm: see struct fsm_main
*/
enum rtw_phl_status phl_fsm_stop_root(struct fsm_root *root)
{
void *d = phl_to_drvpriv(root->phl_info);
void *c = NULL;
_os_thread_stop(d, &(root->thread));
_os_sema_up(d, &root->msg_ready);
_os_thread_deinit(d, &(root->thread));
FSM_INFO(c, "fsm: [root] stopped\n");
return RTW_PHL_STATUS_SUCCESS;
}
/* For EXTERNAL application to start fsm (expose)
* @fsm: see struct fsm_main
*/
enum rtw_phl_status phl_fsm_start_fsm(struct fsm_main *fsm)
{
void *d = phl_to_drvpriv(fsm->phl_info);
struct fsm_obj *obj;
phl_list_for_loop(obj, struct fsm_obj, &fsm->obj_queue.q, list) {
fsm_obj_switch_in(obj);
}
if (fsm->tb.mode == FSM_ALONE_THREAD) {
_os_thread_init(d, &(fsm->thread), fsm_thread_alone, fsm,
"fsm_thread_alone");
_os_thread_schedule(d, &(fsm->thread));
}
_os_set_timer(d, &fsm->fsm_timer, CLOCK_UNIT);
fsm->status = FSM_STATUS_READY;
fsm_status_set(fsm, FSM_STATUS_ENABLE);
FSM_INFO(fsm, "fsm: [%s] started\n", fsm->name);
return RTW_PHL_STATUS_SUCCESS;
}
#define WAIT_DUR 10
#define WAIT_TIMES 20
/* For EXTERNAL application to stop fsm (expose)
* @fsm: see struct fsm_main
*/
enum rtw_phl_status phl_fsm_stop_fsm(struct fsm_main *fsm)
{
void *d = phl_to_drvpriv(fsm->phl_info);
struct fsm_obj *obj;
int wait = WAIT_TIMES;
fsm->should_stop = true;
/* CANCEL all objs within fsm */
fsm_cancel_all_running_obj(fsm);
/* wait fsm module finish its task elegantly */
while ((fsm->status != FSM_STATUS_INITIALIZED) && --wait)
_os_sleep_ms(d, WAIT_DUR);
if (wait < (WAIT_TIMES >> 1))
FSM_INFO(fsm, "%s: take %dms to disable\n",
fsm->name, (WAIT_TIMES-wait)*WAIT_DUR);
fsm_status_set(fsm, FSM_STATUS_DISABLE);
_os_spinlock(d, &fsm->clock_lock, _bh, NULL);
_os_cancel_timer(d, &fsm->fsm_timer);
_os_spinunlock(d, &fsm->clock_lock, _bh, NULL);
phl_list_for_loop(obj, struct fsm_obj, &fsm->obj_queue.q, list) {
fsm_obj_switch_out(obj);
phl_fsm_flush_gbl(obj);
}
fsm_remove_all_queuing_msg(fsm);
if (fsm->tb.mode == FSM_ALONE_THREAD) {
_os_thread_stop(d, &(fsm->thread));
_os_sema_up(d, &fsm->msg_ready);
_os_thread_deinit(d, &(fsm->thread));
}
fsm->should_stop = false;
FSM_INFO(fsm, "fsm: [%s] stopped\n", fsm->name);
return RTW_PHL_STATUS_SUCCESS;
}
/* For EXTERNAL application to generate message buffer (expose)
* Generate message quickly and simply
* @phl: phl_info_t
* @obj: fsm_obj (msg receiver)
* @pbuf: message parameter
* @sz: message parameter size
* @event: event for the message
*/
enum rtw_phl_status phl_fsm_gen_msg(void *phl, struct fsm_obj *obj,
void *pbuf, u32 sz, u16 event)
{
#ifdef PHL_INCLUDE_FSM
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct fsm_msg *msg;
void *d = phl_to_drvpriv(phl_info);
void *param = NULL;
int rtn = RTW_PHL_STATUS_RESOURCE;
/* NEW mem for message */
msg = phl_fsm_new_msg(obj, event);
if (msg == NULL) {
FSM_ERR(obj->fsm, "%s: alloc msg %s fail\n",
phl_fsm_obj_name(obj),
phl_fsm_evt_name(obj, event));
goto msg_fail;
}
/* NEW mem for param */
if (pbuf && sz) {
param = _os_kmem_alloc(d, sz);
if (param == NULL) {
FSM_ERR(obj->fsm,
"%s: alloc param %s fail\n",
phl_fsm_obj_name(obj),
phl_fsm_evt_name(obj, event));
goto param_fail;
}
_os_mem_cpy(d, param, pbuf, sz);
}
msg->param = (void *)param;
msg->param_sz = sz;
rtn = phl_fsm_sent_msg(obj, msg);
if (rtn != RTW_PHL_STATUS_SUCCESS)
goto send_fail;
return rtn;
send_fail:
if (msg->param && msg->param_sz)
_os_kmem_free(d, msg->param, msg->param_sz);
param_fail:
_os_kmem_free(d, msg, sizeof(*msg));
msg_fail:
return rtn;
#else
PHL_WARN("fsm: %s exclude FSM\n", __func__);
return RTW_PHL_STATUS_FAILURE;
#endif /* PHL_INCLUDE_FSM */
}
enum rtw_phl_status phl_fsm_flush_gbl(struct fsm_obj *obj)
{
void *d = phl_to_drvpriv(obj->fsm->phl_info);
struct gbl_param *p;
_os_mem_set(d, &obj->my_gbl_req, 0, sizeof(obj->my_gbl_req));
/* flush obj->gbl_queue */
phl_list_for_loop(p,
struct gbl_param, &obj->gbl_queue.q, list) {
list_del(&p->list);
FSM_WARN(obj->fsm, "%s: del non replied %s:%s #%d\n",
phl_fsm_obj_name(obj),
phl_fsm_obj_name(p->obj_from),
phl_fsm_evt_name(obj, p->event), p->seq);
_os_kmem_free(d, (void *)p, sizeof(*p));
}
obj->gbl_q_len = 0;
return RTW_PHL_STATUS_SUCCESS;
}
/* For EXTERNAL fsm module to announce global msg (expose)
*
* !!! ONLY ALLOW fsm MODULE to call !!!
* !!! Otherwise will have reaing issue !!!
*
* Global msg will go throughs all fsm modules
* Limitation:
* Only supports ONE Glboal announcement at a time
* The latest one always overwrite previous one
*
* reference: phl_fsm_gbl_not_reply_num()
*
* @obj: fsm_obj
* @gbl_evt: Global event to be announced
* @cb_evt: call back event when things was done
* return: wait time(ms); 0: success, waiting is not necessary
* when wait > 0; callee will reply event to caller within ms
* negative value: fail
*/
int phl_fsm_gbl_msg_announce(struct fsm_obj *obj, u16 gbl_evt, u16 cb_evt)
{
struct fsm_root *root = obj->fsm->root;
void *d = phl_to_drvpriv(root->phl_info);
struct fsm_main *fsm = obj->fsm;
struct fsm_main *fsm_t;
struct fsm_obj *obj_t;
int i;
if (obj->my_gbl_req.count > 0) {
/* Should not happen!!
* Have ongoing announcement
* We are waiting for some GBL event reply
*/
for (i = 0; i < PHL_FSM_MAX_WAIT_OCUNT; i++) {
if (obj->my_gbl_req.wait_list[i])
FSM_WARN(fsm,
"%s: drop not replied %s:%s #%d\n",
phl_fsm_obj_name(obj),
phl_fsm_obj_name(
obj->my_gbl_req.wait_list[i]),
phl_fsm_evt_name(obj,
obj->my_gbl_req.event),
obj->my_gbl_req.seq);
}
}
/* create param for announcement */
_os_mem_set(d, &obj->my_gbl_req, 0, sizeof(obj->my_gbl_req));
obj->my_gbl_req.event = gbl_evt;
obj->my_gbl_req.cb_evt = cb_evt;
obj->my_gbl_req.obj_from = obj;
if (obj->fsm->root->gbl_seq == 0) /* 0 reserved */
obj->fsm->root->gbl_seq = 1;
obj->my_gbl_req.seq = obj->fsm->root->gbl_seq++;
/* GLOBAL EVENT will go through all fsms */
phl_list_for_loop(fsm_t, struct fsm_main, &root->q_share_thd.q, list) {
if (fsm_status(fsm_t) != FSM_STATUS_ENABLE) {
FSM_INFO(fsm_t, "fsm: [%s] disabled, skip %s\n",
phl_fsm_fsm_name(fsm_t),
phl_fsm_evt_name(obj, gbl_evt));
continue;
}
/* go through objs */
phl_list_for_loop(obj_t, struct fsm_obj,
&fsm_t->obj_queue.q, list) {
/* skip myself */
if (obj_t == obj)
continue;
fsm_state_run(obj_t, gbl_evt, &obj->my_gbl_req);
if (obj->my_gbl_req.result < 0) {
FSM_ERR(fsm_t,
"%s: announce %s to %s fail(%d)\n",
phl_fsm_obj_name(obj),
phl_fsm_evt_name(obj_t, gbl_evt),
phl_fsm_obj_name(obj_t),
obj->my_gbl_req.result);
return obj->my_gbl_req.result;
}
}
}
return obj->my_gbl_req.wait_ms;
}
/** For GBL announcer to get the number of un-replied fsm (espose)
*
* !!! ONLY ALLOW fsm MODULE to call !!!
*
* reference: phl_fsm_gbl_msg_announce()
* @obj: fsm_obj
* @param: see gbl_param
* return: 0 means there is no non-reply reqest, it's ready to go;
* otherwise yet ready
*/
int phl_fsm_gbl_not_reply_num(struct fsm_obj *obj, struct gbl_param *param)
{
if (param == NULL)
return obj->my_gbl_req.count;
/* we don't have any waitting reply; GBL may be cancelled earlier */
if (obj->my_gbl_req.obj_from == NULL) {
FSM_WARN(obj->fsm, "%s: doesn't expect reply %s:%s #%d\n",
phl_fsm_obj_name(obj),
phl_fsm_obj_name(param->obj_to),
phl_fsm_evt_name(obj, param->event), param->seq);
return -1;
}
/* Are we looking for receiving event ? */
if (param->event != obj->my_gbl_req.event)
return -2;
if (param->seq != obj->my_gbl_req.seq)
return -3;
FSM_INFO(obj->fsm, "%s: got reply %s:%s #%d\n",
phl_fsm_obj_name(obj),
phl_fsm_obj_name(param->obj_to),
phl_fsm_evt_name(obj, param->event), param->seq);
/* clear incoming reporter from waitting list */
param->wait_list[param->count] = NULL;
return --obj->my_gbl_req.count;
}
/** For Global event reciver to inform announcer to wait confirmation (espose)
*
* !!! ONLY ALLOW fsm MODULE to call !!!
*
* Call the function if Global receiver know that it can't finish task in time
* Global event receiver expect FSM_EV_GBL_REPLY to confirm task is finish
* reference : phl_fsm_gbl_msg_release()
*
* @obj: see fsm_obj
* @param: see gbl_param
* @ms: How long(max) can finish task according to received Global event
* caller will set an alarm to react if we can't finish the job on time
* return: negative value : fail
* postive value : seq number of this GBL event
*/
int phl_fsm_gbl_msg_hold(struct fsm_obj *obj,
struct gbl_param *param, u32 ms)
{
void *d = phl_to_drvpriv(obj->fsm->phl_info);
struct gbl_param *p;
if (param->count >= PHL_FSM_MAX_WAIT_OCUNT) {
param->result = -(GBL_ST_WAIT_REACH_MAX);
FSM_ERR(obj->fsm, "%s: hold %s reach max counter %d (%d)",
phl_fsm_obj_name(obj),
phl_fsm_evt_name(obj, param->event), param->count,
param->result);
return param->result;
}
if (obj->gbl_q_len >= PHL_FSM_MAX_WAIT_OCUNT) {
param->result = -(GBL_ST_REPLY_REACH_MAX);
FSM_ERR(obj->fsm, "%s: reply %s reach max counter %d (%d)",
phl_fsm_obj_name(obj),
phl_fsm_evt_name(obj, param->event),
obj->gbl_q_len, param->result);
return param->result;
}
p = (struct gbl_param *)_os_kmem_alloc(d, sizeof(*p));
if (p == NULL) {
param->result = -GBL_ST_ALLOC_MEM_FAIL;
FSM_ERR(obj->fsm, "%s: reply %s, alloc mem fail (%d)",
phl_fsm_obj_name(obj),
phl_fsm_evt_name(obj, param->event),
param->result);
return param->result;
}
/* fill info to inform caller that we need time to process */
param->obj_to = obj;
param->wait_list[param->count] = obj;
param->wait_ms = MAX(param->wait_ms, ms);
param->count++;
/* save param for replying later */
_os_mem_cpy(d, p, (void *)param, sizeof(*param));
fsm_enqueue_list(d, obj->fsm, &obj->gbl_queue, &p->list);
FSM_DBG(obj->fsm, "%s: require %d ms to handle %s:%s #%d\n",
phl_fsm_obj_name(obj), ms,
phl_fsm_obj_name(param->obj_from),
phl_fsm_evt_name(obj, param->event),
param->seq);
return p->seq;
}
/** For Global event reciver to inform announcer that task was done (espose)
*
* !!! ONLY ALLOW fsm MODULE to call !!!
*
* Call the function when Global receiver finish the task
* This is a ASYNC confirmation to Global event announcer
* Global event announcer will receive FSM_EV_GBL_REPLY when function is called
* reference: phl_fsm_gbl_msg_hold()
*
* @obj: see fsm_obj
* @param: see gbl_param
* @obj: see fsm_obj
* @event: event to be replied
* @seq: event to be replied
* @result: result to be replied
*/
enum rtw_phl_status phl_fsm_gbl_msg_release(struct fsm_obj *obj,
u16 event, u32 seq, enum gbl_evt_result result)
{
void *d = phl_to_drvpriv(obj->fsm->phl_info);
struct gbl_param *p, *p_t;
/* handle multiple Global event requests
* go through link list to get reply param according to event
*/
phl_list_for_loop_safe(p, p_t,
struct gbl_param, &obj->gbl_queue.q, list) {
if ((event == p->event) && (seq == p->seq)) {
p->result = result;
FSM_INFO(obj->fsm, "%s: reply %s:%s #%d, result %d\n",
phl_fsm_obj_name(obj),
phl_fsm_obj_name(p->obj_from),
phl_fsm_evt_name(obj, event), p->seq, result);
phl_fsm_gen_msg(obj->fsm->phl_info, p->obj_from,
p, sizeof(*p), p->cb_evt);
list_del(&p->list);
_os_kmem_free(d, (void *)p, sizeof(*p));
break;
}
}
return RTW_PHL_STATUS_SUCCESS;
}
/** Debug funcitons
*
*/
#ifdef PHL_DEBUG_FSM
static void fsm_dbg_dump_fsm_queue(struct fsm_queue *fsmq,
char *s, int *sz,bool detail)
{
struct fsm_main *fsm, *fsm_t;
char *ptr = s;
int len = *sz;
phl_list_for_loop_safe(fsm, fsm_t,
struct fsm_main, &fsmq->q, list) {
_os_snprintf(pstr(ptr), lstr(ptr, len), "\t%4s : %s\n", fsm->name,
fsm->tb.mode ? "STANDALONE":"SHARE");
if (fsm->tb.dump_fsm && detail) {
len = lstr(ptr, len);
ptr = pstr(ptr);
fsm->tb.dump_fsm(fsm, ptr, &len);
}
}
*sz = len;
}
static void fsm_dbg_help(struct fsm_main *fsm, char *s, int *sz, bool detail);
static void fsm_dbg_dump_fsm(struct fsm_main *fsm,
char *s, int *sz, bool detail)
{
int len = *sz;
char *ptr = s;
_os_snprintf(pstr(ptr), lstr(ptr, len), "\t%4s : %s\n", fsm->name,
fsm->tb.mode ? "STANDALONE":"SHARE");
if (fsm->tb.dump_fsm && detail) {
len = lstr(ptr, len);
ptr = pstr(ptr);
fsm->tb.dump_fsm(fsm, ptr, &len);
}
}
static void fsm_dbg_dump_state(struct fsm_main *fsm,
char *s, int *sz, bool detail)
{
int i;
int len = *sz;
_os_snprintf(pstr(s), lstr(s, len),
"[%s] state table\n", fsm->name);
for (i = 0; i < fsm->tb.max_state; i++)
_os_snprintf(pstr(s), lstr(s, len), "\t%4d : %s\n",
i, fsm->tb.state_tbl[i].name);
*sz = len;
}
static void fsm_dbg_dump_event(struct fsm_main *fsm,
char *s, int *sz, bool detail)
{
int i, max;
int len = *sz;
/* internal event */
_os_snprintf(pstr(s), lstr(s, len), "[Internal] event table\n");
max = FSM_EV_END & ~(int_event_tbl[0].event); /* FSM_INT_EV_MASK */
for (i = 1; i < max; i++)
_os_snprintf(pstr(s), lstr(s, len), "\t0x%4x : %s\n",
int_event_tbl[i].event, int_event_tbl[i].name);
/* user event */
_os_snprintf(pstr(s), lstr(s, len), "\n[%s] event table max %d\n", fsm->name, fsm->tb.max_event);
for (i = 0; i < fsm->tb.max_event-1; i++)
_os_snprintf(pstr(s), lstr(s, len), "\t0x%4x : %s\n",
fsm->tb.evt_tbl[i].event, fsm->tb.evt_tbl[i].name);
*sz = len;
}
static void fsm_dbg_dump_obj(struct fsm_main *fsm,
char *s, int *sz, bool detail)
{
struct fsm_obj *obj, *obj_t;
int len = *sz;
char *ptr = s;
phl_list_for_loop_safe(obj, obj_t,
struct fsm_obj, &fsm->obj_queue.q, list) {
_os_snprintf(pstr(ptr), lstr(ptr, len), "%s-%d : state %s",
fsm->name, obj->oid, fsm_state_name(fsm, obj->state));
if (fsm->tb.dump_obj && detail) {
len = lstr(ptr, len);
ptr = pstr(ptr);
fsm->tb.dump_obj(obj->custom_obj, ptr, &len);
}
}
*sz = len;
}
static void fsm_dbg_max(struct fsm_main *fsm, char *s, int *sz, bool detail)
{
int len = *sz;
_os_snprintf(pstr(s), lstr(s, len),
"ERR: fsm %s sould not run to here!!\n", __func__);
*sz = len;
}
struct fsm_debug_ent {
char *opt;
void (*func)(struct fsm_main *fsm, char *s, int *sz, bool detail);
char *desc;
};
struct fsm_debug_ent debug_opt[] = {
{"help", fsm_dbg_help, "help message"},
{"fsm", fsm_dbg_dump_fsm, "all fsm name"},
{"st", fsm_dbg_dump_state, "state name"},
{"ev", fsm_dbg_dump_event, "event name"},
{"obj", fsm_dbg_dump_obj, "obj detail"},
{"max", fsm_dbg_max, "max_opt"}
};
static void _fsm_dbg_help(struct fsm_root *root, char *s, int *sz, bool detail)
{
int i, max_opt;
int len = *sz;
char *ptr = s;
_os_snprintf(pstr(ptr), lstr(ptr, len),
"usage:\tfsm d <fsm_name> <option>\n");
_os_snprintf(pstr(ptr), lstr(ptr, len),
"\tfsm p,<obj_name> <priv_dbg_cmd> ....\n");
_os_snprintf(pstr(ptr), lstr(ptr, len),
"\tfsm s,<obj_name> <EVENT>\n");
_os_snprintf(pstr(ptr), lstr(ptr, len),
"\tfsm w,<fsm_name> <dbg_level|ev_level> <0-5(dbg)>\n");
_os_snprintf(pstr(s), lstr(ptr, len), "\nfsm_name:\n");
len = lstr(ptr, len);
ptr = pstr(ptr);
fsm_dbg_dump_fsm_queue(&root->q_share_thd, ptr, &len, detail);
len = lstr(ptr, len);
ptr = pstr(ptr);
fsm_dbg_dump_fsm_queue(&root->q_alone_thd, ptr, &len, detail);
_os_snprintf(pstr(ptr), lstr(ptr, len), "\noption:\n");
max_opt = sizeof(debug_opt)/sizeof(debug_opt[0]);
for (i = 0; i < max_opt-1; i++)
_os_snprintf(pstr(ptr), lstr(ptr, len), "%12s : %s\n",
debug_opt[i].opt, debug_opt[i].desc);
*sz = len;
}
static void fsm_dbg_help(struct fsm_main *fsm, char *s, int *sz, bool detail)
{
_fsm_dbg_help(fsm->root, s, sz, false);
}
struct fsm_main *get_fsm_by_name(struct fsm_root *root, char *name)
{
void *d = phl_to_drvpriv(root->phl_info);
struct fsm_main *fsm, *fsm_t;
u32 len = _os_strlen((u8 *)name);
if (len > FSM_NAME_LEN)
return NULL;
phl_list_for_loop_safe(fsm, fsm_t,
struct fsm_main, &root->q_share_thd.q, list) {
if (_os_strlen((u8 *)fsm->name) == len &&
_os_mem_cmp(d, fsm->name, name, len) == 0)
return fsm;
}
phl_list_for_loop_safe(fsm, fsm_t,
struct fsm_main, &root->q_alone_thd.q, list) {
if (_os_strlen((u8 *)fsm->name) == len &&
_os_mem_cmp(d, fsm->name, name, len) == 0)
return fsm;
}
return NULL;
}
static u16 fsm_get_evt_id(struct fsm_main *fsm, char *event)
{
void *d = phl_to_drvpriv(fsm->phl_info);
int i;
u32 len = _os_strlen((u8 *)event);
/* internal event */
for (i = 0; i < (sizeof(int_event_tbl)/sizeof(int_event_tbl[0])); i++) {
if (_os_strlen((u8 *)int_event_tbl[i].name) == len &&
_os_mem_cmp(d, int_event_tbl[i].name, event, len) == 0)
return int_event_tbl[i].event;
}
/* user event */
for (i = 0; i < fsm->tb.max_event; i++) {
if (_os_strlen((u8 *)fsm->tb.evt_tbl[i].name) == len &&
_os_mem_cmp(d,
fsm->tb.evt_tbl[i].name, event, len) == 0)
return fsm->tb.evt_tbl[i].event;
}
return FSM_EV_UNKNOWN;
}
#endif /* PHL_DEBUG_FSM */
/* For EXTERNAL application to debug fsm (expose)
* @phl_info: phl main struct
* @input: input cmd
* @input_num: num of cmd param
* @output: output buffer
* @out_len: MAX output buffer len
*
* d: dump fsm info
* fsm <d> <fsm_name> <fsm|st|ev|obj>
* p: private cmd to fsm module
* fsm <p> <obj_name> <cmd to fsm module>
* s: send event to fsm
* fsm <s> <obj_name> <ev>
* w: write debug level
* fsm <w> <fsm_name> <dbg_level|evt_level> <0-5>
*/
void phl_fsm_dbg(struct phl_info_t *phl_info, char input[][MAX_ARGV],
u32 input_num, char *output, u32 out_len)
{
#ifdef PHL_DEBUG_FSM
struct phl_info_t *phl = (struct phl_info_t *)phl_info;
void *d = phl_to_drvpriv(phl);
struct fsm_root *root = phl->fsm_root;
struct fsm_main *fsm = NULL;
struct fsm_obj *obj = NULL;
struct fsm_msg *msg;
int i, max_opt, len = out_len;
char fsm_name[FSM_NAME_LEN], opt[FSM_NAME_LEN], cmd[FSM_NAME_LEN];
char c, *ptr, *sp;
u8 obj_id = 0;
u16 ev_id;
ptr = output;
/* fsm <cmd> <fsm_name> <opt> : fsm d cmd ev
* fsm <cmd> <fsm_name> <evt> : fsm s cmd-1 FSM_EV_CANCEL
*/
if (input_num < 4)
goto help;
_os_mem_set(d, cmd, 0, FSM_NAME_LEN);
_os_mem_cpy(d, cmd, input[1],
MIN(_os_strlen((u8 *)input[1]), FSM_NAME_LEN));
_os_mem_set(d, fsm_name, 0, FSM_NAME_LEN);
_os_mem_cpy(d, fsm_name, input[2],
MIN(_os_strlen((u8 *)input[2]), FSM_NAME_LEN));
_os_mem_set(d, opt, 0, FSM_NAME_LEN);
_os_mem_cpy(d, opt, input[3],
MIN(_os_strlen((u8 *)input[3]), FSM_NAME_LEN));
c = (char)*cmd;
/* read obj_id
* if fsm_name is "cmd-1" then obj number is "1"
*/
sp = _os_strchr((const char *)fsm_name, '-');
if (sp != NULL) {
*sp = '\0';
if (_os_sscanf(sp+1, "%hhd", &obj_id) != 1) {
_os_snprintf(pstr(ptr), lstr(ptr, len),
"ERR: fsm[%s] miss obj_id\n", fsm_name);
return;
}
} else
obj_id = 1; /* assume obj-1 */
/* search fsm by name */
fsm = get_fsm_by_name(root, (char *)fsm_name);
if (fsm == NULL) {
_os_snprintf(pstr(ptr), lstr(ptr, len),
"ERR: fsm[%s] not found\n", fsm_name);
return;
}
obj = fsm_get_obj(fsm, obj_id);
if (obj == NULL) {
_os_snprintf(pstr(ptr), lstr(ptr, len),
"ERR: fsm[%s] miss obj_%d\n", fsm_name, obj_id);
return;
}
switch (c) {
case 'd':
/* dump status */
max_opt = sizeof(debug_opt)/sizeof(debug_opt)[0];
for (i = 0; i < max_opt-1; i++) {
if (_os_strlen((u8 *)debug_opt[i].opt) == \
_os_strlen((u8 *)opt) &&
_os_mem_cmp(d, debug_opt[i].opt, opt,
_os_strlen((u8 *)opt)) == 0) {
len = lstr(ptr, len);
ptr = pstr(ptr);
debug_opt[i].func(fsm, ptr, &len, true);
break;
}
}
break;
case 'p':
/* call fsm private degug function */
if ((fsm != NULL) && (obj != NULL) && (fsm->tb.debug != NULL)){
len = lstr(ptr, len);
ptr = pstr(ptr);
fsm->tb.debug(obj->custom_obj, &input[3],
(input_num - 3), ptr, (u32 *)&len);
}
break;
case 's':
/* get event id */
ev_id = fsm_get_evt_id(fsm, (char *)opt);
if (ev_id == FSM_EV_UNKNOWN) {
_os_snprintf(pstr(ptr), lstr(ptr, len),
"\n\nERR: fsm[%s] unknown event %s\n",
fsm_name, opt);
len = lstr(ptr, len);
ptr = pstr(ptr);
fsm_dbg_dump_event(fsm, ptr, &len, false);
break;
}
if (obj != NULL) {
msg = phl_fsm_new_msg(obj, ev_id);
/* send event */
if (phl_fsm_sent_msg(obj, msg) != RTW_PHL_STATUS_SUCCESS)
_os_kmem_free(d, msg, sizeof(*msg));
}
break;
case 'w':
/* write cfg */
/* fsm w,<fsm_name>,<dbg_level|ev_level>,<0-5(dbg)> */
sp = _os_strchr((const char *)opt, ',');
if (sp == NULL)
goto help;
*sp = '\0';
if (_os_sscanf(sp+1, "%d", &i) != 1)
goto help;
if ((i<0) || (i>5))
goto help;
if (!_os_strcmp(opt, "dbg_level")) {
fsm->tb.dbg_level = (u8)i;
_os_snprintf(pstr(ptr), lstr(ptr, len),
"\n%s: set debug level to %d\n",
phl_fsm_fsm_name(fsm), i);
} else if (!_os_strcmp(opt, "evt_level")) {
_os_snprintf(pstr(ptr), lstr(ptr, len),
"\n%s: set event level to %d\n",
phl_fsm_fsm_name(fsm), i);
//fsm->tb.evt_level = (u8)i;
} else
goto help;
break;
default:
goto help;
}
return;
help:
len = lstr(ptr, len);
ptr = pstr(ptr);
_fsm_dbg_help(fsm->root, ptr, &len, false);
#endif /* PHL_DEBUG_FSM */
}
#endif /*CONFIG_FSM*/
|
2301_81045437/rtl8852be
|
phl/phl_fsm.c
|
C
|
agpl-3.0
| 54,436
|
/******************************************************************************
*
* Copyright(c) 2019 - 2020 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#ifndef __PHL_FSM_H__
#define __PHL_FSM_H__
#define PHL_INCLUDE_FSM
/* #define PHL_DEBUG_FSM */
/* #define FSM_DBG_MEM_OVERWRITE */
#ifdef FSM_DBG_MEM_OVERWRITE
void *fsm_kmalloc(u32 sz);
void fsm_kfree(void *ptr, u32 sz);
#endif
#define FSM_NAME_LEN 32
#define CLOCK_UNIT 10 /* ms */
struct fsm_root;
struct fsm_main;
struct fsm_obj;
/* event map
*/
#define FSM_EV_MASK 0xff00
#define FSM_USR_EV_MASK 0x0100
#define FSM_INT_EV_MASK 0x0200
#define FSM_GBL_EV_MASK 0x0400
#define FSM_EV_UNKNOWN 0xffff
/* FSM EVENT */
enum FSM_EV_ID {
/* Expose to all FSM service */
FSM_INT_EV_MASK_ = FSM_INT_EV_MASK,
FSM_EV_CANCEL,
FSM_EV_TIMER_EXPIRE,
FSM_EV_END, /* for reference */
FSM_EV_SWITCH_IN,
FSM_EV_SWITCH_OUT,
FSM_EV_STATE_IN,
FSM_EV_STATE_OUT,
/* Global Events for announcement */
/* BE CAUREFUL the EVENT ORDER
* please also modify int_event_tbl[] in phl_fsm.c
*/
FSM_GB_SCAN_START,
FSM_GB_SCAN_COMPLETE,
FSM_EV_MAX
};
enum fsm_mode {
FSM_SHARE_THREAD, /* fsm shares root_fsm thread */
FSM_ALONE_THREAD /* fsm has its own thread */
};
enum fsm_run_rtn {
FSM_FREE_PARAM,
FSM_KEEP_PARAM
};
/* @oid: object id
* @event: event id
* @msg: additional message of the event
* @msg_sz: message size
*/
struct fsm_msg {
_os_list list;
u8 oid; /* receiver */
u16 event; /* event id */
struct fsm_main *fsm;
void *param;
int param_sz;
};
enum fsm_dbg_level {
FSM_DBG_NONE,
FSM_DBG_PRINT,
FSM_DBG_ERR,
FSM_DBG_WARN,
FSM_DBG_INFO, /* dbg_level: dump normal info msg */
FSM_DBG_DBG, /* dbg_level: dump state change info */
FSM_DBG_MAX
};
#define EV_ENT(ev) {ev, #ev, FSM_DBG_INFO}
#define EV_WRN(ev) {ev, #ev, FSM_DBG_WARN}
#define EV_INF(ev) {ev, #ev, FSM_DBG_INFO}
#define EV_DBG(ev) {ev, #ev, FSM_DBG_DBG}
struct fsm_event_ent {
u16 event;
char *name;
u8 evt_level;
};
#define ST_ENT(st, hdl) {st, #st, hdl}
struct fsm_state_ent {
u8 state;
char *name;
int (*fsm_func)(void *priv, u16 event, void *param);
};
/* struct of phl_fsm_init_fsm() */
struct rtw_phl_fsm_tb {
u8 mode; /* 0/1: Share/Standalone thread mode */
u8 dbg_level;
u8 evt_level;
u8 max_state;
u16 max_event;
struct fsm_state_ent *state_tbl;
struct fsm_event_ent *evt_tbl;
/* debug function */
void (*dump_obj)(void *obj, char *p, int *sz); /* optional */
void (*dump_fsm)(void *fsm, char *p, int *sz); /* optional */
void (*debug)(void *custom_obj, char input[][MAX_ARGV],
u32 input_num, char *output, u32 *out_len);
};
enum gbl_evt_result {
GBL_ST_NOT_FINISH,
GBL_ST_SUCCESS,
GBL_ST_ABORT,
GBL_ST_FAIL,
GBL_ST_WAIT_REACH_MAX,
GBL_ST_REPLY_REACH_MAX,
GBL_ST_ALLOC_MEM_FAIL
};
#define PHL_FSM_MAX_WAIT_OCUNT 16
struct gbl_param {
struct list_head list;
u16 event;
u16 cb_evt;
u16 count;
u32 wait_ms;
u32 seq;
struct fsm_obj *obj_from; /* GBL event original issuer */
struct fsm_obj *obj_to; /* GBL event original receiver */
struct fsm_obj *wait_list[PHL_FSM_MAX_WAIT_OCUNT];
int result;
};
/* GBL event caller use */
int phl_fsm_gbl_msg_announce(struct fsm_obj *obj, u16 gbl_evt, u16 cb_evt);
int phl_fsm_gbl_not_reply_num(struct fsm_obj *obj, struct gbl_param *param);
enum rtw_phl_status phl_fsm_flush_gbl(struct fsm_obj *obj);
/* GBL event callee use */
int phl_fsm_gbl_msg_hold(struct fsm_obj *obj,
struct gbl_param *param, u32 ms);
enum rtw_phl_status phl_fsm_gbl_msg_release(struct fsm_obj *obj,
u16 event, u32 seq, enum gbl_evt_result result);
/* fsm init funciton */
struct fsm_root *phl_fsm_init_root(void *phl_info);
void phl_fsm_deinit_root(struct fsm_root *root);
enum rtw_phl_status phl_fsm_start_root(struct fsm_root *root);
enum rtw_phl_status phl_fsm_stop_root(struct fsm_root *root);
struct fsm_main *phl_fsm_init_fsm(struct fsm_root *root,
const char *name, void *phl_info, struct rtw_phl_fsm_tb *tb);
enum rtw_phl_status phl_fsm_deinit_fsm(struct fsm_main *fsm);
enum rtw_phl_status phl_fsm_start_fsm(struct fsm_main *fsm);
enum rtw_phl_status phl_fsm_stop_fsm(struct fsm_main *fsm);
void *phl_fsm_new_obj(struct fsm_main *fsm, void **fsm_obj, int obj_sz);
void phl_fsm_destory_obj(struct fsm_obj *obj);
void phl_fsm_dbg(struct phl_info_t *phl_info, char input[][MAX_ARGV],
u32 input_num, char *output, u32 out_len);
/* fsm operating funciton */
struct fsm_msg *phl_fsm_new_msg(struct fsm_obj *obj, u16 event);
enum rtw_phl_status phl_fsm_sent_msg(struct fsm_obj *obj, struct fsm_msg *msg);
enum rtw_phl_status phl_fsm_cancel_obj(struct fsm_obj *obj);
void phl_fsm_state_goto(struct fsm_obj *obj, u8 new_state);
void phl_fsm_set_alarm(struct fsm_obj *obj, int ms, u16 event);
void phl_fsm_set_alarm_ext(struct fsm_obj *obj,
int ms, u16 event, u8 id, void *priv);
void phl_fsm_cancel_alarm(struct fsm_obj *obj);
void phl_fsm_cancel_alarm_ext(struct fsm_obj *obj, u8 id);
void phl_fsm_pause_alarm(struct fsm_obj *obj);
void phl_fsm_pause_alarm_ext(struct fsm_obj *obj, u8 id);
void phl_fsm_resume_alarm(struct fsm_obj *obj);
void phl_fsm_resume_alarm_ext(struct fsm_obj *obj, u8 id);
bool phl_fsm_is_alarm_off(struct fsm_obj *obj);
bool phl_fsm_is_alarm_off_ext(struct fsm_obj *obj, u8 id);
void phl_fsm_extend_alarm_ext(struct fsm_obj *obj, int ms, u8 id);
u8 phl_fsm_dbg_level(struct fsm_main *fsm, u8 level);
u8 phl_fsm_evt_level(struct fsm_main *fsm, u8 level);
enum rtw_phl_status phl_fsm_gen_msg(void *phl, struct fsm_obj *obj,
void *pbuf, u32 sz, u16 event);
/* function to manipulate extra queue */
int phl_fsm_enqueue_ext(struct fsm_main *fsm, struct fsm_msg *msg, u8 to_head);
struct fsm_msg *phl_fsm_dequeue_ext(struct fsm_main *fsm);
int phl_fsm_is_ext_queue_empty(struct fsm_main *fsm);
/* util function */
u8 phl_fsm_state_id(struct fsm_obj *obj);
char *phl_fsm_obj_name(struct fsm_obj *obj);
char *phl_fsm_evt_name(struct fsm_obj *obj, u16 event);
u32 phl_fsm_time_pass(u32 start);
u32 phl_fsm_time_left(u32 start, u32 end);
#ifndef CONFIG_PHL_WPP
#define FSM_PRINT(fsm, fmt, ...) \
do {\
if (!fsm || phl_fsm_dbg_level(fsm, FSM_DBG_PRINT)) \
PHL_TRACE(COMP_PHL_FSM, _PHL_ALWAYS_, fmt, ##__VA_ARGS__); \
} while (0)
#define FSM_ERR(fsm, fmt, ...) \
do {\
if (!fsm || phl_fsm_dbg_level(fsm, FSM_DBG_ERR)) \
PHL_TRACE(COMP_PHL_FSM, _PHL_ERR_, fmt, ##__VA_ARGS__); \
} while (0)
#define FSM_WARN(fsm, fmt, ...) \
do {\
if (!fsm || phl_fsm_dbg_level(fsm, FSM_DBG_WARN)) \
PHL_TRACE(COMP_PHL_FSM, _PHL_WARNING_, fmt, ##__VA_ARGS__); \
} while (0)
#define FSM_INFO(fsm, fmt, ...) \
do {\
if (!fsm || phl_fsm_dbg_level(fsm, FSM_DBG_INFO)) \
PHL_TRACE(COMP_PHL_FSM, _PHL_INFO_, fmt, ##__VA_ARGS__); \
} while (0)
#define FSM_DBG(fsm, fmt, ...) \
do {\
if (!fsm || phl_fsm_dbg_level(fsm, FSM_DBG_DBG)) \
PHL_TRACE(COMP_PHL_FSM, _PHL_DEBUG_, fmt, ##__VA_ARGS__); \
} while (0)
#define FSM_MSG(fsm, level_, fmt, ...) \
do {\
if (!fsm || phl_fsm_dbg_level(fsm, level_)) \
PHL_TRACE(COMP_PHL_FSM, _PHL_INFO_, fmt, ##__VA_ARGS__); \
} while (0)
#define FSM_EV_MSG(fsm, level_, fmt, ...) \
do {\
if (!fsm || phl_fsm_evt_level(fsm, level_)) \
PHL_TRACE(COMP_PHL_FSM, _PHL_INFO_, fmt, ##__VA_ARGS__); \
} while (0)
#else
#undef FSM_PRINT
#define FSM_PRINT(fsm, fmt, ...)
#undef FSM_ERR
#define FSM_ERR(fsm, fmt, ...)
#undef FSM_WARN
#define FSM_WARN(fsm, fmt, ...)
#undef FSM_INFO
#define FSM_INFO(fsm, fmt, ...)
#undef FSM_DBG
#define FSM_DBG(fsm, fmt, ...)
#undef FSM_MSG
#define FSM_MSG(fsm, level, fmt, ...)
#undef FSM_EV_MSG
#define FSM_EV_MSG(fsm, level, fmt, ...)
#endif /* CONFIG_PHL_WPP */
#endif /* __PHL_FSM_H__ */
|
2301_81045437/rtl8852be
|
phl/phl_fsm.h
|
C
|
agpl-3.0
| 8,147
|
/******************************************************************************
*
* Copyright(c) 2020 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
* Hsinchu 300, Taiwan.
*
* Larry Finger <Larry.Finger@lwfinger.net>
*
*****************************************************************************/
#ifndef __PHL_GIT_INFO_H__
#define __PHL_GIT_INFO_H__
/*@--------------------------[Define] ---------------------------------------*/
#define RTK_CORE_SHA1 "ac110bf56bb47665682a63f42c82142de5a56b44"
#define RTK_PHL_SHA1 "ce9074107430c1b249d17ea5360d94d3a33d2e94"
#define RTK_HALMAC_SHA1 "025e21c09189dd99a204f42ff51d785dbde9307e"
#define RTK_HALBB_SHA1 "641c07e31234f84d568b1d24c27c1195e4ee2518"
#define RTK_HALRF_SHA1 "14b7b01a671288bd0327b422b654e3aaf04f9166"
#define RTK_BTC_SHA1 "69da9b7887f52a3e85590488a74cb487c583e122"
#define RTK_CORE_TAGINFO "v1.15.6.0.2-0-gac110bf5"
#define RTK_PHL_TAGINFO "PHL_1_15_28_0100-5-gce9074107"
#define RTK_HALMAC_TAGINFO "HALMAC_0_25_34_0-0-g025e21c09"
#define RTK_HALBB_TAGINFO "HALBB_025_019_03-0-g641c07e"
#define RTK_HALRF_TAGINFO "HALRF_025_00_024-0-g14b7b01"
#define RTK_BTC_TAGINFO "HALBTC_025_017-0-g69da9b7"
#endif /* __PHL_GIT_INFO_H__ */
|
2301_81045437/rtl8852be
|
phl/phl_git_info.h
|
C
|
agpl-3.0
| 1,884
|
/******************************************************************************
*
* Copyright(c) 2019 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#ifndef _PHL_HEADERS_H_
#define _PHL_HEADERS_H_
/*
* Basic components
*/
#include "rtw_general_def.h"
#include "phl_list.h"
#include "phl_status.h"
#include "pltfm_ops.h"
#include "phl_config.h"
#include "hal_g6/hal_config.h"
#include "phl_types.h"
#include "phl_util.h"
#include "phl_def.h"
#include "phl_debug.h"
/*
* PHL Feature headers start
*/
#include "phl_regulation_def.h"
#include "phl_chnlplan.h"
#include "phl_country.h"
#include "phl_scan_instance.h"
#include "phl_regulation.h"
#include "phl_trx_def.h"
#include "phl_wow_def.h"
#include "phl_btc_def.h"
#include "phl_p2pps_def.h"
#include "phl_cmd_dispatch.h"
#include "phl_wow.h"
#include "phl_watchdog.h"
#include "phl_struct.h"
#include "phl_test_def.h"
#include "phl_test.h"
#include "phl_msg_hub.h"
#include "phl_chan.h"
#include "phl_role.h"
#include "custom/phl_custom_def.h"
#include "custom/phl_custom_api.h"
#include "custom/phl_custom.h"
#include "phl_sw_cap.h"
#include "phl_sta.h"
#include "phl_mcc_def.h"
#include "phl_mr.h"
#include "test/trx_test.h"
#include "test/cmd_disp_test.h"
#include "test/phl_ps_dbg_cmd.h"
#include "test/phl_ser_dbg_cmd.h"
#include "phl_tx.h"
#include "phl_rx_agg.h"
#include "phl_rx.h"
#include "phl_ser_def.h"
#include "phl_sound.h"
#include "phl_btc.h"
#include "phl_ps.h"
#ifdef CONFIG_FSM
#include "phl_fsm.h"
#include "phl_cmd_fsm.h"
#include "phl_cmd_job.h"
#include "phl_ser_fsm.h"
#include "phl_scan_fsm.h"
#include "phl_btc_fsm.h"
#include "phl_sound_fsm.h"
#endif /*CONFIG_FSM*/
#include "phl_cmd_ps.h"
#include "phl_sound_cmd.h"
#include "phl_cmd_ser.h"
#include "phl_pkt_ofld.h"
#include "test/phl_dbg_cmd.h"
#include "phl_chan.h"
#include "phl_acs.h"
#include "phl_led_def.h"
#include "phl_led.h"
#include "phl_trx_mit.h"
#include "phl_dm.h"
#include "phl_notify.h"
#include "phl_cmd_general.h"
#include "phl_p2pps.h"
#include "phl_cmd_btc.h"
#include "phl_twt.h"
#include "phl_ecsa_export.h"
#include "phl_ecsa.h"
#include "phl_thermal.h"
#include "phl_txpwr.h"
#include "phl_ext_tx_pwr_lmt_def.h"
#ifdef CONFIG_PHL_CHANNEL_INFO
#include "phl_chan_info.h"
#endif /* CONFIG_PHL_CHANNEL_INFO */
#ifdef CONFIG_PCI_HCI
#include "hci/phl_trx_def_pcie.h"
#include "hci/phl_trx_pcie.h"
#endif
#ifdef CONFIG_USB_HCI
#include "hci/phl_trx_def_usb.h"
#include "hci/phl_trx_usb.h"
#endif
#ifdef CONFIG_SDIO_HCI
#include "hci/phl_trx_def_sdio.h"
#include "hci/phl_trx_sdio.h"
#endif
/******************************************************************************
* Driver layer shall pass wifi configuration flag to PHL for feature category
* comment it temporarily.
*****************************************************************************/
#ifdef CONFIG_WIFI_5
#endif
#ifdef CONFIG_WIFI_6
#include "hal_g6/hal_general_def.h"
#include "hal_g6/hal_def.h"
#include "hal_g6/hal_api.h"
#else
/*temporarily*/
#include "hal_g6/hal_general_def.h"
#include "hal_g6/hal_def.h"
#include "hal_g6/hal_api.h"
#endif
#ifdef CONFIG_WIFI_7
#endif
#endif /*_PHL_HEADERS_H_*/
|
2301_81045437/rtl8852be
|
phl/phl_headers.h
|
C
|
agpl-3.0
| 3,640
|
/******************************************************************************
*
* Copyright(c) 2019 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#ifndef _PHL_HEADERS_CORE_H_
#define _PHL_HEADERS_CORE_H_
#ifndef PHL_PLATFORM_LINUX
/* Common definition from PHL */
#include "rtw_general_def.h"
#include "phl_config.h"
#include "phl_list.h"
#include "phl_status.h"
#include "phl_types.h"
#include "pltfm_ops.h"
#else
#include "phl_types.h"
#include "phl_config.h"
#include "phl_status.h"
#endif
/* Exported structure/definition from PHL */
#include "phl_util.h"
#include "phl_regulation_def.h"
#include "phl_chnlplan.h"
#include "phl_country.h"
#include "phl_scan_instance.h"
#include "phl_def.h"
#include "phl_trx_def.h"
#include "phl_wow_def.h"
#include "phl_btc_def.h"
#include "phl_test_def.h"
#include "test/trx_test.h"
#include "test/cmd_disp_test.h"
#include "phl_led_def.h"
#include "custom/phl_custom_def.h"
#include "phl_ext_tx_pwr_lmt_def.h"
#include "phl_chnlplan.h"
/* Exported APIs from PHL */
#include "phl_api.h"
#include "phl_scan.h"
#include "phl_btc.h"
#include "phl_cmd_job.h"
#include "phl_connect.h"
#include "phl_ecsa_export.h"
#include "custom/phl_custom_api.h"
#endif /*_PHL_HEADERS_CORE_H_*/
|
2301_81045437/rtl8852be
|
phl/phl_headers_core.h
|
C
|
agpl-3.0
| 1,724
|
/******************************************************************************
*
* Copyright(c) 2019 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#define _PHL_INIT_C_
#include "phl_headers.h"
void _phl_com_init_rssi_stat(struct rtw_phl_com_t *phl_com)
{
u8 i = 0, j = 0;
for (i = 0; i < RTW_RSSI_TYPE_MAX; i++) {
phl_com->rssi_stat.ma_rssi_ele_idx[i] = 0;
phl_com->rssi_stat.ma_rssi_ele_cnt[i] = 0;
phl_com->rssi_stat.ma_rssi_ele_sum[i] = 0;
phl_com->rssi_stat.ma_rssi[i] = 0;
for (j = 0; j < PHL_RSSI_MAVG_NUM; j++)
phl_com->rssi_stat.ma_rssi_ele[i][j] = 0;
}
_os_spinlock_init(phl_com->drv_priv, &(phl_com->rssi_stat.lock));
}
void _phl_com_deinit_rssi_stat(struct rtw_phl_com_t *phl_com)
{
_os_spinlock_free(phl_com->drv_priv, &(phl_com->rssi_stat.lock));
}
/**
* rtw_phl_init_ppdu_sts_para(...)
* Description:
* 1. Do not call this api after rx started.
* 2. PPDU Status per PKT settings
**/
void rtw_phl_init_ppdu_sts_para(struct rtw_phl_com_t *phl_com,
bool en_psts_per_pkt, bool psts_ampdu,
u8 rx_fltr)
{
#ifdef CONFIG_PHL_RX_PSTS_PER_PKT
phl_com->ppdu_sts_info.en_psts_per_pkt = en_psts_per_pkt;
phl_com->ppdu_sts_info.psts_ampdu = psts_ampdu;
#ifdef RTW_WKARD_DISABLE_PSTS_PER_PKT_DATA
/* Forced disable PSTS for DATA frame, to avoid unknown performance issue */
rx_fltr &= (~RTW_PHL_PSTS_FLTR_DATA);
#endif
phl_com->ppdu_sts_info.ppdu_sts_filter = rx_fltr;
#else
return;
#endif
}
void _phl_com_deinit_ppdu_sts(struct rtw_phl_com_t *phl_com)
{
#ifdef CONFIG_PHL_RX_PSTS_PER_PKT
u8 i = 0;
u8 j = 0;
for (j = 0; j < HW_BAND_MAX; j++) {
for (i = 0; i < PHL_MAX_PPDU_CNT; i++) {
if (phl_com->ppdu_sts_info.sts_ent[j][i].frames.cnt != 0) {
PHL_INFO("[Error] deinit_ppdu_sts : frame queue is not empty\n");
}
pq_deinit(phl_com->drv_priv,
&(phl_com->ppdu_sts_info.sts_ent[j][i].frames));
}
}
#else
return;
#endif
}
void _phl_com_init_ppdu_sts(struct rtw_phl_com_t *phl_com)
{
#ifdef CONFIG_PHL_RX_PSTS_PER_PKT
u8 i = 0;
#endif
u8 j = 0;
for (j = 0; j < HW_BAND_MAX; j++) {
phl_com->ppdu_sts_info.cur_rx_ppdu_cnt[j] = 0xFF;
}
#ifdef CONFIG_PHL_RX_PSTS_PER_PKT
/* Default enable when compile flag is set. */
phl_com->ppdu_sts_info.en_psts_per_pkt = true;
/**
* Filter of buffer pkt for phy status:
* if the correspond bit is set to 1,
* the pkt will be buffer till ppdu sts or next ppdu is processed.
**/
phl_com->ppdu_sts_info.ppdu_sts_filter =
RTW_PHL_PSTS_FLTR_MGNT | RTW_PHL_PSTS_FLTR_CTRL |
RTW_PHL_PSTS_FLTR_EXT_RSVD;
/* if set to false, only the first mpdu in ppdu has phy status */
phl_com->ppdu_sts_info.psts_ampdu = false;
phl_com->ppdu_sts_info.en_fake_psts = false;
for (j = 0; j < HW_BAND_MAX; j++) {
for (i = 0; i < PHL_MAX_PPDU_CNT; i++) {
pq_init(phl_com->drv_priv,
&(phl_com->ppdu_sts_info.sts_ent[j][i].frames));
}
}
#endif
#ifdef CONFIG_PHY_INFO_NTFY
phl_com->ppdu_sts_info.msg_aggr_cnt = 0;
#endif
}
static void phl_msg_entry(void* priv, struct phl_msg *msg)
{
struct phl_info_t *phl_info = (struct phl_info_t *)priv;
u8 mdl_id = MSG_MDL_ID_FIELD(msg->msg_id);
u16 evt_id = MSG_EVT_ID_FIELD(msg->msg_id);
PHL_DBG("[PHL]%s, mdl_id(%d)\n", __FUNCTION__, mdl_id);
/* dispatch received PHY msg here */
switch(mdl_id) {
case PHL_MDL_PHY_MGNT:
phl_msg_hub_phy_mgnt_evt_hdlr(phl_info, evt_id);
break;
case PHL_MDL_RX:
phl_msg_hub_rx_evt_hdlr(phl_info, evt_id, msg->inbuf, msg->inlen);
break;
case PHL_MDL_BTC:
rtw_phl_btc_hub_msg_hdl(phl_info, msg);
break;
default:
break;
}
}
static enum rtw_phl_status phl_register_msg_entry(struct phl_info_t *phl_info)
{
struct phl_msg_receiver ctx;
void *d = phl_to_drvpriv(phl_info);
u8 imr[] = {PHL_MDL_PHY_MGNT, PHL_MDL_RX, PHL_MDL_MRC, PHL_MDL_POWER_MGNT
, PHL_MDL_BTC};
_os_mem_set(d, &ctx, 0, sizeof(struct phl_msg_receiver));
ctx.incoming_evt_notify = phl_msg_entry;
ctx.priv = (void*)phl_info;
if( phl_msg_hub_register_recver((void*)phl_info,
&ctx, MSG_RECV_PHL) == RTW_PHL_STATUS_SUCCESS) {
/* PHL layer module should set IMR for receiving
desired PHY msg and handle it in phl_phy_evt_entry*/
phl_msg_hub_update_recver_mask((void*)phl_info, MSG_RECV_PHL,
imr, sizeof(imr), false);
return RTW_PHL_STATUS_SUCCESS;
}
else
return RTW_PHL_STATUS_FAILURE;
}
static enum rtw_phl_status phl_deregister_msg_entry(
struct phl_info_t *phl_info)
{
return phl_msg_hub_deregister_recver((void*)phl_info, MSG_RECV_PHL);
}
static enum rtw_phl_status phl_fw_init(struct phl_info_t *phl_info)
{
enum rtw_phl_status phl_status = RTW_PHL_STATUS_RESOURCE;
struct rtw_phl_com_t *phl_com = phl_info->phl_com;
struct rtw_fw_info_t *fw_info = &phl_com->fw_info;
FUNCIN_WSTS(phl_status);
fw_info->rom_buff = _os_mem_alloc(phl_to_drvpriv(phl_info), RTW_MAX_FW_SIZE);
if (!fw_info->rom_buff) {
PHL_ERR("%s : rom buff allocate fail!!\n", __func__);
goto mem_alloc_fail;
}
fw_info->ram_buff = _os_mem_alloc(phl_to_drvpriv(phl_info), RTW_MAX_FW_SIZE);
if (!fw_info->ram_buff) {
PHL_ERR("%s : ram buff allocate fail!!\n", __func__);
goto mem_alloc_fail;
}
#ifdef CONFIG_PHL_REUSED_FWDL_BUF
fw_info->buf = _os_mem_alloc(phl_to_drvpriv(phl_info), RTW_MAX_FW_SIZE);
/* if allocating failed, fw bin files will be reading every time */
if (!fw_info->buf)
PHL_WARN("%s : buf for fw storage allocate fail!!\n", __func__);
fw_info->wow_buf = _os_mem_alloc(phl_to_drvpriv(phl_info), RTW_MAX_FW_SIZE);
/* if allocating failed, fw bin files will be reading every time */
if (!fw_info->wow_buf)
PHL_WARN("%s : wow buf for wowlan fw storage allocate fail!!\n", __func__);
#endif
phl_status = RTW_PHL_STATUS_SUCCESS;
FUNCOUT_WSTS(phl_status);
mem_alloc_fail:
return phl_status;
}
static void phl_fw_deinit(struct phl_info_t *phl_info)
{
struct rtw_fw_info_t *fw_info = &phl_info->phl_com->fw_info;
if (fw_info->rom_buff)
_os_mem_free(phl_to_drvpriv(phl_info), fw_info->rom_buff,
RTW_MAX_FW_SIZE);
if (fw_info->ram_buff)
_os_mem_free(phl_to_drvpriv(phl_info), fw_info->ram_buff,
RTW_MAX_FW_SIZE);
#ifdef CONFIG_REUSED_FWDL_BUF
if (fw_info->buf)
_os_mem_free(phl_to_drvpriv(phl_info), fw_info->buf,
RTW_MAX_FW_SIZE);
if (fw_info->wow_buf)
_os_mem_free(phl_to_drvpriv(phl_info), fw_info->wow_buf,
RTW_MAX_FW_SIZE);
#endif
/* allocate in rtw_hal_ld_fw_symbol */
if (fw_info->sym_buf)
_os_mem_free(phl_to_drvpriv(phl_info), fw_info->sym_buf,
RTW_MAX_FW_SIZE);
}
static enum rtw_phl_status
phl_register_background_module_entry(struct phl_info_t *phl_info)
{
enum rtw_phl_status phl_status = RTW_PHL_STATUS_FAILURE;
#ifdef CONFIG_CMD_DISP
/*
* setup struct phl_module_ops & call dispr_register_module
* to register background module instance.
* call dispr_deregister_module if you need to dynamically
* deregister the instance of background module.
*/
/* 1,2,3 cmd controller section */
/* 41 ~ 70 mandatory background module section*/
#ifdef CONFIG_PHL_CMD_SER
phl_status = phl_register_ser_module(phl_info);
if (phl_status != RTW_PHL_STATUS_SUCCESS)
return phl_status;
#endif
#ifdef CONFIG_POWER_SAVE
phl_status = phl_register_ps_module(phl_info);
if (phl_status != RTW_PHL_STATUS_SUCCESS)
return phl_status;
#endif
/* 70 ~ 127 optional background module section*/
#ifdef CONFIG_PHL_CMD_BTC
phl_status = phl_register_btc_module(phl_info);
if (phl_status != RTW_PHL_STATUS_SUCCESS)
return phl_status;
#endif
phl_status = phl_register_custom_module(phl_info, HW_BAND_0);
if (phl_status != RTW_PHL_STATUS_SUCCESS)
return phl_status;
phl_status = phl_register_led_module(phl_info);
if (phl_status != RTW_PHL_STATUS_SUCCESS)
return phl_status;
phl_status = phl_register_cmd_general(phl_info);
if (phl_status != RTW_PHL_STATUS_SUCCESS)
return phl_status;
/* 10 ~ 40 protocol, wifi role section*/
phl_status = phl_register_mrc_module(phl_info);
if (phl_status != RTW_PHL_STATUS_SUCCESS)
return phl_status;
phl_status = phl_snd_cmd_register_module(phl_info);
if (phl_status != RTW_PHL_STATUS_SUCCESS)
return phl_status;
#else
phl_status = RTW_PHL_STATUS_SUCCESS;
#endif
return phl_status;
}
static enum rtw_phl_status phl_com_init(void *drv_priv,
struct phl_info_t *phl_info,
struct rtw_ic_info *ic_info)
{
enum rtw_phl_status phl_status = RTW_PHL_STATUS_FAILURE;
phl_info->phl_com = _os_mem_alloc(drv_priv,
sizeof(struct rtw_phl_com_t));
if (phl_info->phl_com == NULL) {
phl_status = RTW_PHL_STATUS_RESOURCE;
PHL_ERR("alloc phl_com failed\n");
goto error_phl_com_mem;
}
phl_info->phl_com->phl_priv = phl_info;
phl_info->phl_com->drv_priv = drv_priv;
phl_info->phl_com->hci_type = ic_info->hci_type;
phl_info->phl_com->edcca_mode = RTW_EDCCA_NORMAL;
phl_sw_cap_init(phl_info->phl_com);
_os_spinlock_init(drv_priv, &phl_info->phl_com->evt_info.evt_lock);
phl_fw_init(phl_info);
#ifdef CONFIG_PHL_CHANNEL_INFO
phl_status = phl_chaninfo_init(phl_info);
if (phl_status)
goto error_phl_com_mem;
#endif /* CONFIG_PHL_CHANNEL_INFO */
_phl_com_init_rssi_stat(phl_info->phl_com);
_phl_com_init_ppdu_sts(phl_info->phl_com);
phl_status = RTW_PHL_STATUS_SUCCESS;
return phl_status;
error_phl_com_mem:
return phl_status;
}
static enum rtw_phl_status phl_hci_init(struct phl_info_t *phl_info,
struct rtw_ic_info *ic_info)
{
enum rtw_phl_status phl_status = RTW_PHL_STATUS_FAILURE;
phl_info->hci = _os_mem_alloc(phl_to_drvpriv(phl_info),
sizeof(struct hci_info_t));
if (phl_info->hci == NULL) {
phl_status = RTW_PHL_STATUS_RESOURCE;
goto error_hci_mem;
}
#ifdef CONFIG_USB_HCI
phl_info->hci->usb_bulkout_size = ic_info->usb_info.usb_bulkout_size;
#endif
/* init variable of hci_info_t struct */
phl_status = RTW_PHL_STATUS_SUCCESS;
error_hci_mem:
return phl_status;
}
static void phl_com_deinit(struct phl_info_t *phl_info,
struct rtw_phl_com_t *phl_com)
{
void *drv_priv = phl_to_drvpriv(phl_info);
/* deinit variable or stop mechanism. */
if (phl_com) {
phl_sw_cap_deinit(phl_info->phl_com);
_os_spinlock_free(drv_priv, &phl_com->evt_info.evt_lock);
_phl_com_deinit_rssi_stat(phl_info->phl_com);
_phl_com_deinit_ppdu_sts(phl_info->phl_com);
phl_fw_deinit(phl_info);
#ifdef CONFIG_PHL_CHANNEL_INFO
phl_chaninfo_deinit(phl_info);
#endif /* CONFIG_PHL_CHANNEL_INFO */
_os_mem_free(drv_priv, phl_com, sizeof(struct rtw_phl_com_t));
}
}
static void phl_hci_deinit(struct phl_info_t *phl_info, struct hci_info_t *hci)
{
/* deinit variable or stop mechanism. */
if (hci)
_os_mem_free(phl_to_drvpriv(phl_info), hci,
sizeof(struct hci_info_t));
}
static enum rtw_phl_status _phl_hci_ops_check(struct phl_info_t *phl_info)
{
enum rtw_phl_status status = RTW_PHL_STATUS_SUCCESS;
struct phl_hci_trx_ops *trx_ops = phl_info->hci_trx_ops;
if (!trx_ops->hci_trx_init) {
phl_ops_error_msg("hci_trx_init");
status = RTW_PHL_STATUS_FAILURE;
}
if (!trx_ops->hci_trx_deinit) {
phl_ops_error_msg("hci_trx_deinit");
status = RTW_PHL_STATUS_FAILURE;
}
if (!trx_ops->prepare_tx) {
phl_ops_error_msg("prepare_tx");
status = RTW_PHL_STATUS_FAILURE;
}
if (!trx_ops->recycle_rx_buf) {
phl_ops_error_msg("recycle_rx_buf");
status = RTW_PHL_STATUS_FAILURE;
}
if (!trx_ops->tx) {
phl_ops_error_msg("tx");
status = RTW_PHL_STATUS_FAILURE;
}
if (!trx_ops->rx) {
phl_ops_error_msg("rx");
status = RTW_PHL_STATUS_FAILURE;
}
if (!trx_ops->trx_cfg) {
phl_ops_error_msg("trx_cfg");
status = RTW_PHL_STATUS_FAILURE;
}
if (!trx_ops->pltfm_tx) {
phl_ops_error_msg("pltfm_tx");
status = RTW_PHL_STATUS_FAILURE;
}
if (!trx_ops->alloc_h2c_pkt_buf) {
phl_ops_error_msg("alloc_h2c_pkt_buf");
status = RTW_PHL_STATUS_FAILURE;
}
if (!trx_ops->free_h2c_pkt_buf) {
phl_ops_error_msg("free_h2c_pkt_buf");
status = RTW_PHL_STATUS_FAILURE;
}
if (!trx_ops->trx_reset) {
phl_ops_error_msg("trx_reset");
status = RTW_PHL_STATUS_FAILURE;
}
if (!trx_ops->trx_resume) {
phl_ops_error_msg("trx_resume");
status = RTW_PHL_STATUS_FAILURE;
}
if (!trx_ops->req_tx_stop) {
phl_ops_error_msg("req_tx_stop");
status = RTW_PHL_STATUS_FAILURE;
}
if (!trx_ops->req_rx_stop) {
phl_ops_error_msg("req_rx_stop");
status = RTW_PHL_STATUS_FAILURE;
}
if (!trx_ops->is_tx_pause) {
phl_ops_error_msg("is_tx_pause");
status = RTW_PHL_STATUS_FAILURE;
}
if (!trx_ops->is_rx_pause) {
phl_ops_error_msg("is_rx_pause");
status = RTW_PHL_STATUS_FAILURE;
}
if (!trx_ops->get_txbd_buf) {
phl_ops_error_msg("get_txbd_buf");
status = RTW_PHL_STATUS_FAILURE;
}
if (!trx_ops->get_rxbd_buf) {
phl_ops_error_msg("get_rxbd_buf");
status = RTW_PHL_STATUS_FAILURE;
}
if (!trx_ops->recycle_rx_pkt) {
phl_ops_error_msg("recycle_rx_pkt");
status = RTW_PHL_STATUS_FAILURE;
}
if (!trx_ops->register_trx_hdlr) {
phl_ops_error_msg("register_trx_hdlr");
status = RTW_PHL_STATUS_FAILURE;
}
if (!trx_ops->rx_handle_normal) {
phl_ops_error_msg("rx_handle_normal");
status = RTW_PHL_STATUS_FAILURE;
}
if (!trx_ops->tx_watchdog) {
phl_ops_error_msg("tx_watchdog");
status = RTW_PHL_STATUS_FAILURE;
}
#ifdef CONFIG_PCI_HCI
if (!trx_ops->recycle_busy_wd) {
phl_ops_error_msg("recycle_busy_wd");
status = RTW_PHL_STATUS_FAILURE;
}
if (!trx_ops->recycle_busy_h2c) {
phl_ops_error_msg("recycle_busy_h2c");
status = RTW_PHL_STATUS_FAILURE;
}
#endif
#ifdef CONFIG_USB_HCI
if (!trx_ops->pend_rxbuf) {
phl_ops_error_msg("pend_rxbuf");
status = RTW_PHL_STATUS_FAILURE;
}
if (!trx_ops->recycle_tx_buf) {
phl_ops_error_msg("recycle_tx_buf");
status = RTW_PHL_STATUS_FAILURE;
}
#endif
return status;
}
static enum rtw_phl_status phl_set_hci_ops(struct phl_info_t *phl_info)
{
#ifdef CONFIG_PCI_HCI
if (phl_get_hci_type(phl_info->phl_com) == RTW_HCI_PCIE)
phl_hook_trx_ops_pci(phl_info);
#endif
#ifdef CONFIG_USB_HCI
if (phl_get_hci_type(phl_info->phl_com) == RTW_HCI_USB)
phl_hook_trx_ops_usb(phl_info);
#endif
#ifdef CONFIG_SDIO_HCI
if (phl_get_hci_type(phl_info->phl_com) == RTW_HCI_SDIO)
phl_hook_trx_ops_sdio(phl_info);
#endif
return _phl_hci_ops_check(phl_info);
}
#ifdef CONFIG_FSM
static enum rtw_phl_status phl_cmd_init(struct phl_info_t *phl_info)
{
if (phl_info->cmd_fsm != NULL)
return RTW_PHL_STATUS_FAILURE;
phl_info->cmd_fsm = phl_cmd_new_fsm(phl_info->fsm_root, phl_info);
if (phl_info->cmd_fsm == NULL)
return RTW_PHL_STATUS_FAILURE;
if (phl_info->cmd_obj != NULL)
goto obj_fail;
phl_info->cmd_obj = phl_cmd_new_obj(phl_info->cmd_fsm, phl_info);
if (phl_info->cmd_obj == NULL)
goto obj_fail;
return RTW_PHL_STATUS_SUCCESS;
obj_fail:
phl_fsm_deinit_fsm(phl_info->cmd_fsm);
phl_info->cmd_fsm = NULL;
return RTW_PHL_STATUS_FAILURE;
}
static void phl_cmd_deinit(struct phl_info_t *phl_info)
{
phl_cmd_destory_obj(phl_info->cmd_obj);
phl_info->cmd_obj = NULL;
phl_cmd_destory_fsm(phl_info->cmd_fsm);
phl_info->cmd_fsm = NULL;
}
static enum rtw_phl_status phl_ser_init(struct phl_info_t *phl_info)
{
if (phl_info->ser_fsm != NULL)
return RTW_PHL_STATUS_FAILURE;
phl_info->ser_fsm = phl_ser_new_fsm(phl_info->fsm_root, phl_info);
if (phl_info->ser_fsm == NULL)
return RTW_PHL_STATUS_FAILURE;
if (phl_info->ser_obj != NULL)
goto obj_fail;
phl_info->ser_obj = phl_ser_new_obj(phl_info->ser_fsm, phl_info);
if (phl_info->ser_obj == NULL)
goto obj_fail;
return RTW_PHL_STATUS_SUCCESS;
obj_fail:
phl_ser_destory_fsm(phl_info->ser_fsm);
phl_info->ser_fsm = NULL;
return RTW_PHL_STATUS_FAILURE;
}
static void phl_ser_deinit(struct phl_info_t *phl_info)
{
phl_ser_destory_obj(phl_info->ser_obj);
phl_info->ser_obj = NULL;
phl_ser_destory_fsm(phl_info->ser_fsm);
phl_info->ser_fsm = NULL;
}
static enum rtw_phl_status phl_btc_init(struct phl_info_t *phl_info)
{
if (phl_info->btc_fsm != NULL)
return RTW_PHL_STATUS_FAILURE;
phl_info->btc_fsm = phl_btc_new_fsm(phl_info->fsm_root, phl_info);
if (phl_info->btc_fsm == NULL)
return RTW_PHL_STATUS_FAILURE;
phl_info->btc_obj = phl_btc_new_obj(phl_info->btc_fsm, phl_info);
if (phl_info->btc_obj == NULL)
goto obj_fail;
return RTW_PHL_STATUS_SUCCESS;
obj_fail:
phl_fsm_deinit_fsm(phl_info->btc_fsm);
phl_info->btc_fsm = NULL;
return RTW_PHL_STATUS_FAILURE;
}
static void phl_btc_deinit(struct phl_info_t *phl_info)
{
phl_btc_destory_obj(phl_info->btc_obj);
phl_info->btc_obj = NULL;
phl_btc_destory_fsm(phl_info->btc_fsm);
phl_info->btc_fsm = NULL;
}
static enum rtw_phl_status phl_scan_init(struct phl_info_t *phl_info)
{
if (phl_info->scan_fsm != NULL)
return RTW_PHL_STATUS_FAILURE;
phl_info->scan_fsm = phl_scan_new_fsm(phl_info->fsm_root, phl_info);
if (phl_info->scan_fsm == NULL)
return RTW_PHL_STATUS_FAILURE;
if (phl_info->scan_obj != NULL)
goto obj_fail;
phl_info->scan_obj = phl_scan_new_obj(phl_info->scan_fsm, phl_info);
if (phl_info->scan_obj == NULL)
goto obj_fail;
return RTW_PHL_STATUS_SUCCESS;
obj_fail:
phl_fsm_deinit_fsm(phl_info->scan_fsm);
phl_info->scan_fsm = NULL;
return RTW_PHL_STATUS_FAILURE;
}
static void phl_scan_deinit(struct phl_info_t *phl_info)
{
phl_scan_destory_obj(phl_info->scan_obj);
phl_info->scan_obj = NULL;
phl_scan_destory_fsm(phl_info->scan_fsm);
phl_info->scan_fsm = NULL;
}
static enum rtw_phl_status phl_sound_init(struct phl_info_t *phl_info)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_SUCCESS;
if (phl_info->snd_fsm!= NULL)
return RTW_PHL_STATUS_FAILURE;
phl_info->snd_fsm = phl_sound_new_fsm(phl_info->fsm_root, phl_info);
if (phl_info->snd_fsm == NULL)
return RTW_PHL_STATUS_FAILURE;
pstatus = phl_snd_new_obj(phl_info->snd_fsm, phl_info);
if (pstatus != RTW_PHL_STATUS_SUCCESS)
goto obj_fail;
return pstatus;
obj_fail:
phl_fsm_deinit_fsm(phl_info->snd_fsm);
phl_info->snd_fsm = NULL;
return RTW_PHL_STATUS_FAILURE;
}
static void phl_sound_deinit(struct phl_info_t *phl_info)
{
phl_snd_destory_obj(phl_info->snd_obj);
phl_info->snd_obj = NULL;
phl_snd_destory_fsm(phl_info->snd_fsm);
phl_info->snd_fsm = NULL;
}
static enum rtw_phl_status phl_fsm_init(struct phl_info_t *phl_info)
{
if (phl_info->fsm_root != NULL)
return RTW_PHL_STATUS_FAILURE;
/* allocate memory for fsm to do version control */
phl_info->fsm_root = phl_fsm_init_root(phl_info);
if (phl_info->fsm_root == NULL)
return RTW_PHL_STATUS_FAILURE;
return RTW_PHL_STATUS_SUCCESS;
}
static void phl_fsm_deinit(struct phl_info_t *phl_info)
{
/* free memory for fsm */
phl_fsm_deinit_root(phl_info->fsm_root);
phl_info->fsm_root = NULL;
}
static enum rtw_phl_status phl_fsm_module_init(struct phl_info_t *phl_info)
{
enum rtw_phl_status phl_status = RTW_PHL_STATUS_SUCCESS;
phl_status = phl_cmd_init(phl_info);
if (phl_status != RTW_PHL_STATUS_SUCCESS) {
PHL_ERR("phl_cmd_init failed\n");
goto cmd_fail;
}
phl_status = phl_ser_init(phl_info);
if (phl_status != RTW_PHL_STATUS_SUCCESS) {
PHL_ERR("phl_ser_init failed\n");
goto ser_fail;
}
phl_status = phl_btc_init(phl_info);
if (phl_status != RTW_PHL_STATUS_SUCCESS) {
PHL_ERR("phl_btc_init failed\n");
goto btc_fail;
}
phl_status = phl_scan_init(phl_info);
if (phl_status != RTW_PHL_STATUS_SUCCESS) {
PHL_ERR("phl_scan_init failed\n");
goto scan_fail;
}
phl_status = phl_sound_init(phl_info);
if (phl_status != RTW_PHL_STATUS_SUCCESS) {
PHL_ERR("phl_sound_init failed\n");
goto sound_fail;
}
return phl_status;
sound_fail:
phl_scan_deinit(phl_info);
scan_fail:
phl_btc_deinit(phl_info);
btc_fail:
phl_ser_deinit(phl_info);
ser_fail:
phl_cmd_deinit(phl_info);
cmd_fail:
return phl_status;
}
static void phl_fsm_module_deinit(struct phl_info_t *phl_info)
{
phl_sound_deinit(phl_info);
phl_scan_deinit(phl_info);
phl_btc_deinit(phl_info);
phl_ser_deinit(phl_info);
phl_cmd_deinit(phl_info);
}
static enum rtw_phl_status phl_fsm_start(struct phl_info_t *phl_info)
{
return phl_fsm_start_root(phl_info->fsm_root);
}
static enum rtw_phl_status phl_fsm_stop(struct phl_info_t *phl_info)
{
return phl_fsm_stop_root(phl_info->fsm_root);
}
static enum rtw_phl_status phl_fsm_module_start(struct phl_info_t *phl_info)
{
enum rtw_phl_status phl_status = RTW_PHL_STATUS_SUCCESS;
phl_status = phl_fsm_start_fsm(phl_info->ser_fsm);
if (phl_status != RTW_PHL_STATUS_SUCCESS)
goto ser_fail;
phl_status = phl_btc_start(phl_info->btc_obj);
if (phl_status != RTW_PHL_STATUS_SUCCESS)
goto btc_fail;
phl_status = phl_fsm_start_fsm(phl_info->scan_fsm);
if (phl_status != RTW_PHL_STATUS_SUCCESS)
goto scan_fail;
phl_status = phl_cmd_start(phl_info->cmd_obj);
if (phl_status != RTW_PHL_STATUS_SUCCESS)
goto cmd_fail;
phl_status = phl_fsm_start_fsm(phl_info->snd_fsm);
if (phl_status != RTW_PHL_STATUS_SUCCESS)
goto snd_fail;
return phl_status;
snd_fail:
phl_fsm_stop_fsm(phl_info->cmd_fsm);
phl_fsm_stop_fsm(phl_info->scan_fsm);
scan_fail:
phl_fsm_stop_fsm(phl_info->btc_fsm);
btc_fail:
phl_fsm_stop_fsm(phl_info->ser_fsm);
ser_fail:
phl_fsm_cmd_stop(phl_info);
cmd_fail:
return phl_status;
}
static enum rtw_phl_status phl_fsm_module_stop(struct phl_info_t *phl_info)
{
enum rtw_phl_status phl_status = RTW_PHL_STATUS_SUCCESS;
phl_fsm_stop_fsm(phl_info->snd_fsm);
phl_fsm_stop_fsm(phl_info->scan_fsm);
phl_fsm_stop_fsm(phl_info->btc_fsm);
phl_fsm_stop_fsm(phl_info->ser_fsm);
phl_fsm_cmd_stop(phl_info);
return phl_status;
}
#endif /*CONFIG_FSM*/
static enum rtw_phl_status phl_module_init(struct phl_info_t *phl_info)
{
enum rtw_phl_status phl_status = RTW_PHL_STATUS_SUCCESS;
phl_status = phl_msg_hub_init(phl_info);
if (phl_status != RTW_PHL_STATUS_SUCCESS) {
PHL_ERR("phl_msg_hub_init failed\n");
goto msg_hub_fail;
}
phl_status = phl_wow_mdl_init(phl_info);
if (phl_status != RTW_PHL_STATUS_SUCCESS) {
PHL_ERR("phl_wow_mdl_init failed\n");
goto wow_init_fail;
}
phl_status = phl_pkt_ofld_init(phl_info);
if (phl_status != RTW_PHL_STATUS_SUCCESS) {
PHL_ERR("phl_pkt_ofld_init failed\n");
goto pkt_ofld_init_fail;
}
if (!phl_test_module_init(phl_info)) {
PHL_ERR("phl_test_module_init failed\n");
phl_status = RTW_PHL_STATUS_FAILURE;
goto error_test_module_init;
}
phl_status = phl_p2pps_init(phl_info);
if (phl_status != RTW_PHL_STATUS_SUCCESS) {
PHL_ERR("phl_p2pps_init failed\n");
goto error_p2pps_init;
}
phl_status = phl_disp_eng_init(phl_info, HW_BAND_MAX);
if (phl_status != RTW_PHL_STATUS_SUCCESS) {
PHL_ERR("phl_disp_eng_init failed\n");
goto error_disp_eng_init;
}
phl_status = phl_register_background_module_entry(phl_info);
if (phl_status != RTW_PHL_STATUS_SUCCESS) {
PHL_ERR("phl_register_disp_eng_module_entry failed\n");
goto error_disp_eng_reg_init;
}
phl_status = phl_ecsa_ctrl_init(phl_info);
if (phl_status != RTW_PHL_STATUS_SUCCESS) {
PHL_ERR("phl_ecsa_ctrl_init failed\n");
goto error_ecsa_ctrl_init;
}
return phl_status;
error_ecsa_ctrl_init:
error_disp_eng_reg_init:
phl_disp_eng_deinit(phl_info);
error_disp_eng_init:
phl_p2pps_deinit(phl_info);
error_p2pps_init:
phl_test_module_deinit(phl_info->phl_com);
error_test_module_init:
phl_pkt_ofld_deinit(phl_info);
pkt_ofld_init_fail:
phl_wow_mdl_deinit(phl_info);
wow_init_fail:
phl_msg_hub_deinit(phl_info);
msg_hub_fail:
return phl_status;
}
static void phl_module_deinit(struct phl_info_t *phl_info)
{
phl_ecsa_ctrl_deinit(phl_info);
phl_disp_eng_deinit(phl_info);
phl_test_module_deinit(phl_info->phl_com);
phl_pkt_ofld_deinit(phl_info);
phl_wow_mdl_deinit(phl_info);
phl_msg_hub_deinit(phl_info);
phl_p2pps_deinit(phl_info);
}
static enum rtw_phl_status phl_module_start(struct phl_info_t *phl_info)
{
enum rtw_phl_status phl_status = RTW_PHL_STATUS_SUCCESS;
if (!phl_test_module_start(phl_info->phl_com)) {
PHL_ERR("phl_test_module_start failed\n");
phl_status = RTW_PHL_STATUS_FAILURE;
goto error_test_mdl_start;
}
phl_status = phl_disp_eng_start(phl_info);
if (phl_status != RTW_PHL_STATUS_SUCCESS) {
PHL_ERR("phl_disp_eng_start failed\n");
goto error_disp_eng_start;
}
if(phl_info->msg_hub) {
phl_msg_hub_start(phl_info);
phl_register_msg_entry(phl_info);
}
return phl_status;
error_disp_eng_start:
phl_test_module_stop(phl_info->phl_com);
error_test_mdl_start:
return phl_status;
}
static enum rtw_phl_status phl_module_stop(struct phl_info_t *phl_info)
{
enum rtw_phl_status phl_status = RTW_PHL_STATUS_SUCCESS;
phl_status = phl_cmd_enqueue(phl_info, HW_BAND_0, MSG_EVT_MDL_CHECK_STOP,
NULL, 0, NULL, PHL_CMD_WAIT, 500);
phl_disp_eng_stop(phl_info);
phl_test_module_stop(phl_info->phl_com);
if(phl_info->msg_hub) {
phl_deregister_msg_entry(phl_info);
phl_msg_hub_stop(phl_info);
}
return phl_status;
}
static enum rtw_phl_status phl_var_init(struct phl_info_t *phl_info)
{
return RTW_PHL_STATUS_SUCCESS;
}
static void phl_var_deinit(struct phl_info_t *phl_info)
{
}
struct rtw_phl_com_t *rtw_phl_get_com(void *phl)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
return phl_info->phl_com;
}
static void phl_regulation_init(void *drv_priv, void *phl)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct rtw_regulation *rg = NULL;
if (!drv_priv || !phl)
return;
rg = &phl_info->regulation;
_os_spinlock_init(drv_priv, &rg->lock);
rg->init = 1;
rg->domain.code = INVALID_DOMAIN_CODE;
rg->domain_6g.code = INVALID_DOMAIN_CODE;
rg->tpo = TPO_NA;
}
static void phl_regulation_deinit(void *drv_priv, void *phl)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct rtw_regulation *rg = NULL;
if (!drv_priv || !phl)
return;
rg = &phl_info->regulation;
_os_spinlock_free(drv_priv, &rg->lock);
}
enum rtw_phl_status rtw_phl_init(void *drv_priv, void **phl,
struct rtw_ic_info *ic_info)
{
struct phl_info_t *phl_info = NULL;
enum rtw_phl_status phl_status = RTW_PHL_STATUS_FAILURE;
enum rtw_hal_status hal_status = RTW_HAL_STATUS_FAILURE;
FUNCIN();
phl_info = _os_mem_alloc(drv_priv, sizeof(struct phl_info_t));
if (phl_info == NULL) {
phl_status = RTW_PHL_STATUS_RESOURCE;
PHL_ERR("alloc phl_info failed\n");
goto error_phl_mem;
}
_os_mem_set(drv_priv, phl_info, 0, sizeof(struct phl_info_t));
*phl = phl_info;
phl_regulation_init(drv_priv, phl_info);
phl_status = phl_com_init(drv_priv, phl_info, ic_info);
if (phl_status != RTW_PHL_STATUS_SUCCESS) {
phl_status = RTW_PHL_STATUS_RESOURCE;
PHL_ERR("alloc phl_com failed\n");
goto error_phl_com_mem;
}
phl_status = phl_hci_init(phl_info, ic_info);
if (phl_status != RTW_PHL_STATUS_SUCCESS) {
PHL_ERR("phl_hci_init failed\n");
goto error_hci_init;
}
phl_status = phl_set_hci_ops(phl_info);
if (phl_status != RTW_PHL_STATUS_SUCCESS) {
PHL_ERR("phl_set_hci_ops failed\n");
goto error_set_hci_ops;
}
#ifdef CONFIG_FSM
phl_status = phl_fsm_init(phl_info);
if (phl_status != RTW_PHL_STATUS_SUCCESS) {
PHL_ERR("phl_fsm_init failed\n");
goto error_fsm_init;
}
/* init FSM modules */
phl_status = phl_fsm_module_init(phl_info);
if (phl_status != RTW_PHL_STATUS_SUCCESS) {
PHL_ERR("phl_fsm_module_init failed\n");
goto error_fsm_module_init;
}
#endif
phl_status = phl_twt_init(*phl);
if (phl_status != RTW_PHL_STATUS_SUCCESS) {
PHL_ERR("phl_twt_init failed\n");
goto error_phl_twt_init;
}
hal_status = rtw_hal_init(drv_priv, phl_info->phl_com,
&(phl_info->hal), ic_info->ic_id);
if ((hal_status != RTW_HAL_STATUS_SUCCESS) || (phl_info->hal == NULL)) {
phl_status = RTW_PHL_STATUS_HAL_INIT_FAILURE;
PHL_ERR("rtw_hal_init failed status(%d),phl_info->hal(%p)\n",
hal_status, phl_info->hal);
goto error_hal_init;
}
/*send bus info to hal*/
rtw_hal_hci_cfg(phl_info->phl_com, phl_info->hal, ic_info);
/*get hw capability from mac/bb/rf/btc/efuse/fw-defeature-rpt*/
hal_status = rtw_hal_read_chip_info(phl_info->phl_com, phl_info->hal);
if (hal_status != RTW_HAL_STATUS_SUCCESS) {
phl_status = RTW_PHL_STATUS_HAL_INIT_FAILURE;
PHL_ERR("rtw_hal_read_chip_info failed\n");
goto error_hal_read_chip_info;
}
hal_status = rtw_hal_var_init(phl_info->phl_com, phl_info->hal);
if (hal_status != RTW_HAL_STATUS_SUCCESS) {
phl_status = RTW_PHL_STATUS_HAL_INIT_FAILURE;
PHL_ERR("rtw_hal_var_init failed\n");
goto error_hal_var_init;
}
phl_status = phl_var_init(phl_info);
if (phl_status != RTW_PHL_STATUS_SUCCESS) {
PHL_ERR("phl_var_init failed\n");
goto error_phl_var_init;
}
/* init mr_ctrl, wifi_role[] */
phl_status = phl_mr_ctrl_init(phl_info);
if (phl_status != RTW_PHL_STATUS_SUCCESS) {
PHL_ERR("phl_mr_ctrl_init failed\n");
goto error_wifi_role_ctrl_init;
}
/* init modules */
phl_status = phl_module_init(phl_info);
if (phl_status != RTW_PHL_STATUS_SUCCESS) {
PHL_ERR("phl_module_init failed\n");
goto error_module_init;
}
/* init macid_ctrl , stainfo_ctrl*/
/* init after get hw cap - macid number*/
phl_status = phl_macid_ctrl_init(phl_info);
if (phl_status != RTW_PHL_STATUS_SUCCESS) {
PHL_ERR("phl_macid_ctrl_init failed\n");
goto error_macid_ctrl_init;
}
/*init after hal_init - hal_sta_info*/
phl_status = phl_stainfo_ctrl_init(phl_info);
if (phl_status != RTW_PHL_STATUS_SUCCESS) {
PHL_ERR("phl_stainfo_ctrl_init failed\n");
goto error_stainfo_ctrl_init;
}
FUNCOUT();
return phl_status;
error_stainfo_ctrl_init:
phl_macid_ctrl_deinit(phl_info);
error_macid_ctrl_init:
phl_module_deinit(phl_info);
error_module_init:
phl_mr_ctrl_deinit(phl_info);
error_wifi_role_ctrl_init:
phl_var_deinit(phl_info);
error_phl_var_init:
error_hal_var_init:
error_hal_read_chip_info:
rtw_hal_deinit(phl_info->phl_com, phl_info->hal);
error_hal_init:
error_phl_twt_init:
phl_twt_deinit(phl);
#ifdef CONFIG_FSM
phl_fsm_module_deinit(phl_info);
error_fsm_module_init:
phl_fsm_deinit(phl_info);
error_fsm_init:
/* Do nothing */
#endif
error_set_hci_ops:
phl_hci_deinit(phl_info, phl_info->hci);
error_hci_init:
phl_com_deinit(phl_info, phl_info->phl_com);
error_phl_com_mem:
if (phl_info) {
phl_regulation_deinit(drv_priv, phl_info);
_os_mem_free(drv_priv, phl_info, sizeof(struct phl_info_t));
*phl = phl_info = NULL;
}
error_phl_mem:
return phl_status;
}
void rtw_phl_deinit(void *phl)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
void *drv_priv = phl_to_drvpriv(phl_info);
if (phl_info) {
phl_twt_deinit(phl);
phl_stainfo_ctrl_deinie(phl_info);
phl_macid_ctrl_deinit(phl_info);
/*deinit mr_ctrl, wifi_role[]*/
phl_module_deinit(phl_info);
phl_mr_ctrl_deinit(phl_info);
rtw_hal_deinit(phl_info->phl_com, phl_info->hal);
phl_var_deinit(phl_info);
#ifdef CONFIG_FSM
phl_fsm_module_deinit(phl_info);
phl_fsm_deinit(phl_info);
#endif
phl_hci_deinit(phl_info, phl_info->hci);
phl_com_deinit(phl_info, phl_info->phl_com);
phl_regulation_deinit(drv_priv, phl_info);
_os_mem_free(drv_priv, phl_info,
sizeof(struct phl_info_t));
}
}
enum rtw_phl_status
rtw_phl_trx_alloc(void *phl)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
enum rtw_phl_status phl_status = RTW_PHL_STATUS_FAILURE;
phl_status = phl_datapath_init(phl_info);
if (phl_status != RTW_PHL_STATUS_SUCCESS) {
PHL_ERR("phl_datapath_init failed\n");
goto error_datapath;
}
phl_status = phl_trx_test_init(phl);
if (phl_status != RTW_PHL_STATUS_SUCCESS) {
PHL_ERR("phl_trx_test_init failed\n");
goto error_trx_test;
}
return phl_status;
error_trx_test:
phl_datapath_deinit(phl_info);
error_datapath:
return phl_status;
}
void
rtw_phl_trx_free_handler(void *phl)
{
phl_trx_free_handler(phl);
}
void
rtw_phl_trx_free_sw_rsc(void *phl)
{
phl_trx_free_sw_rsc(phl);
phl_trx_test_deinit(phl);
}
void
rtw_phl_trx_free(void *phl)
{
rtw_phl_trx_free_handler(phl);
rtw_phl_trx_free_sw_rsc(phl);
}
bool rtw_phl_is_init_completed(void *phl)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
return rtw_hal_is_inited(phl_info->phl_com, phl_info->hal);
}
#ifdef RTW_PHL_BCN
enum rtw_phl_status
phl_add_beacon(struct phl_info_t *phl_info, struct rtw_bcn_info_cmn *bcn_cmn)
{
struct rtw_phl_com_t *phl_com = phl_info->phl_com;
void *hal = phl_info->hal;
if(rtw_hal_add_beacon(phl_com, hal, bcn_cmn) == RTW_HAL_STATUS_SUCCESS)
return RTW_PHL_STATUS_SUCCESS;
else
return RTW_PHL_STATUS_FAILURE;
}
enum rtw_phl_status phl_update_beacon(struct phl_info_t *phl_info, u8 bcn_id)
{
struct rtw_phl_com_t *phl_com = phl_info->phl_com;
void *hal = phl_info->hal;
if(rtw_hal_update_beacon(phl_com, hal, bcn_id) == RTW_HAL_STATUS_SUCCESS)
return RTW_PHL_STATUS_SUCCESS;
else
return RTW_PHL_STATUS_FAILURE;
}
enum rtw_phl_status rtw_phl_free_bcn_entry(void *phl, struct rtw_wifi_role_t *wrole)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct rtw_phl_com_t *phl_com = phl_info->phl_com;
struct rtw_bcn_info_cmn *bcn_cmn = &wrole->bcn_cmn;
void *hal = phl_info->hal;
enum rtw_phl_status phl_status = RTW_PHL_STATUS_SUCCESS;
if (bcn_cmn->bcn_added == 1) {
if (rtw_hal_free_beacon(phl_com, hal, bcn_cmn->bcn_id) == RTW_HAL_STATUS_SUCCESS) {
bcn_cmn->bcn_added = 0;
phl_status = RTW_PHL_STATUS_SUCCESS;
} else {
phl_status = RTW_PHL_STATUS_FAILURE;
}
}
return phl_status;
}
enum rtw_phl_status
phl_beacon_stop(struct phl_info_t *phl_info, struct rtw_wifi_role_t *wrole, u8 stop)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_SUCCESS;
enum rtw_hal_status hstatus = RTW_HAL_STATUS_SUCCESS;
hstatus = rtw_hal_beacon_stop(phl_info->hal, wrole, stop);
if (hstatus != RTW_HAL_STATUS_SUCCESS)
pstatus = RTW_PHL_STATUS_FAILURE;
return pstatus;
}
enum rtw_phl_status
phl_issue_beacon(struct phl_info_t *phl_info, struct rtw_bcn_info_cmn *bcn_cmn)
{
struct rtw_phl_com_t *phl_com = phl_info->phl_com;
struct rtw_bcn_info_cmn *wrole_bcn_cmn;
struct rtw_wifi_role_t *wifi_role;
void *drv = phl_com->drv_priv;
u8 bcn_id, role_idx, bcn_added;
role_idx = bcn_cmn->role_idx;
if (role_idx > MAX_WIFI_ROLE_NUMBER) {
PHL_ERR("%s: role idx err(%d)\n", __func__, role_idx);
return RTW_PHL_STATUS_FAILURE;
}
wifi_role = &phl_com->wifi_roles[role_idx];
wrole_bcn_cmn = &wifi_role->bcn_cmn;
bcn_added = wrole_bcn_cmn->bcn_added;
_os_mem_cpy(drv, wrole_bcn_cmn, bcn_cmn, sizeof(struct rtw_bcn_info_cmn));
/* BCN add */
if (!bcn_added) {
if(phl_add_beacon(phl_info, wrole_bcn_cmn) == RTW_PHL_STATUS_SUCCESS) {
wrole_bcn_cmn->bcn_added = true;
return RTW_PHL_STATUS_SUCCESS;
} else {
return RTW_PHL_STATUS_FAILURE;
}
} else {
/* BCN update */
bcn_id = wrole_bcn_cmn->bcn_id;
if(phl_update_beacon(phl_info, bcn_id) == RTW_PHL_STATUS_SUCCESS)
return RTW_PHL_STATUS_SUCCESS;
else
return RTW_PHL_STATUS_FAILURE;
}
}
#ifdef CONFIG_CMD_DISP
enum rtw_phl_status
phl_cmd_issue_bcn_hdl(struct phl_info_t *phl_info, u8 *param)
{
struct rtw_bcn_info_cmn *bcn_cmn = (struct rtw_bcn_info_cmn *)param;
return phl_issue_beacon(phl_info, bcn_cmn);
}
static void _phl_issue_bcn_done(void *drv_priv, u8 *buf, u32 buf_len,
enum rtw_phl_status status)
{
if (buf) {
_os_kmem_free(drv_priv, buf, buf_len);
buf = NULL;
PHL_INFO("%s.....\n", __func__);
}
}
enum rtw_phl_status
rtw_phl_cmd_issue_beacon(void *phl,
struct rtw_wifi_role_t *wifi_role,
struct rtw_bcn_info_cmn *bcn_cmn,
enum phl_cmd_type cmd_type,
u32 cmd_timeout)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
void *drv = wifi_role->phl_com->drv_priv;
enum rtw_phl_status psts = RTW_PHL_STATUS_FAILURE;
struct rtw_bcn_info_cmn *param = NULL;
u32 param_len;
if (cmd_type == PHL_CMD_DIRECTLY) {
psts = phl_issue_beacon(phl_info, bcn_cmn);
goto _exit;
}
param_len = sizeof(struct rtw_bcn_info_cmn);
param = _os_kmem_alloc(drv, param_len);
if (param == NULL) {
PHL_ERR("%s: alloc param failed!\n", __func__);
goto _exit;
}
_os_mem_cpy(drv, param, bcn_cmn, param_len);
psts = phl_cmd_enqueue(phl_info,
wifi_role->hw_band,
MSG_EVT_ISSUE_BCN,
(u8 *)param, param_len,
_phl_issue_bcn_done,
cmd_type, cmd_timeout);
if (is_cmd_failure(psts)) {
/* Send cmd success, but wait cmd fail*/
psts = RTW_PHL_STATUS_FAILURE;
} else if (psts != RTW_PHL_STATUS_SUCCESS) {
/* Send cmd fail */
_os_kmem_free(phl_to_drvpriv(phl_info), param, param_len);
psts = RTW_PHL_STATUS_FAILURE;
}
_exit:
return psts;
}
struct stop_bcn_param {
struct rtw_wifi_role_t *wrole;
u8 stop;
};
enum rtw_phl_status
phl_cmd_stop_bcn_hdl(struct phl_info_t *phl_info, u8 *param)
{
struct stop_bcn_param *bcn_param = (struct stop_bcn_param *)param;
return phl_beacon_stop(phl_info, bcn_param->wrole, bcn_param->stop);
}
static void _phl_stop_bcn_done(void *drv_priv, u8 *buf, u32 buf_len,
enum rtw_phl_status status)
{
if (buf) {
_os_kmem_free(drv_priv, buf, buf_len);
buf = NULL;
PHL_INFO("%s.....\n", __func__);
}
}
enum rtw_phl_status
rtw_phl_cmd_stop_beacon(void *phl,
struct rtw_wifi_role_t *wifi_role,
u8 stop,
enum phl_cmd_type cmd_type,
u32 cmd_timeout)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
void *drv = wifi_role->phl_com->drv_priv;
enum rtw_phl_status psts = RTW_PHL_STATUS_FAILURE;
struct stop_bcn_param *param = NULL;
u32 param_len;
if (cmd_type == PHL_CMD_DIRECTLY) {
psts = phl_beacon_stop(phl_info, wifi_role, stop);
goto _exit;
}
param_len = sizeof(struct stop_bcn_param);
param = _os_kmem_alloc(drv, param_len);
if (param == NULL) {
PHL_ERR("%s: alloc param failed!\n", __func__);
goto _exit;
}
param->wrole = wifi_role;
param->stop = stop;
psts = phl_cmd_enqueue(phl_info,
wifi_role->hw_band,
MSG_EVT_STOP_BCN,
(u8 *)param, param_len,
_phl_stop_bcn_done,
cmd_type, cmd_timeout);
if (is_cmd_failure(psts)) {
/* Send cmd success, but wait cmd fail*/
psts = RTW_PHL_STATUS_FAILURE;
} else if (psts != RTW_PHL_STATUS_SUCCESS) {
/* Send cmd fail */
_os_kmem_free(phl_to_drvpriv(phl_info), param, param_len);
psts = RTW_PHL_STATUS_FAILURE;
}
_exit:
return psts;
}
#else /*for FSM*/
enum rtw_phl_status
rtw_phl_cmd_stop_beacon(void *phl,
struct rtw_wifi_role_t *wifi_role,
u8 stop,
enum phl_cmd_type cmd_type,
u32 cmd_timeout)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
return phl_beacon_stop(phl_info, wifi_role, stop);
}
enum rtw_phl_status
rtw_phl_cmd_issue_beacon(void *phl,
struct rtw_wifi_role_t *wifi_role,
struct rtw_bcn_info_cmn *bcn_cmn,
enum phl_cmd_type cmd_type,
u32 cmd_timeout)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
return phl_issue_beacon(phl_info, bcn_cmn);
}
#endif /*CONFIG_CMD_DISP*/
#endif /*RTW_PHL_BCN*/
void rtw_phl_cap_pre_config(void *phl)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
/* FW Pre-config */
rtw_hal_fw_cap_pre_config(phl_info->phl_com,phl_info->hal);
/* Bus Pre-config */
rtw_hal_bus_cap_pre_config(phl_info->phl_com,phl_info->hal);
}
enum rtw_phl_status rtw_phl_preload(void *phl)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
enum rtw_hal_status hal_status = RTW_HAL_STATUS_SUCCESS;
#ifdef RTW_WKARD_PRELOAD_TRX_RESET
struct phl_hci_trx_ops *ops = phl_info->hci_trx_ops;
#endif
FUNCIN();
hal_status = rtw_hal_preload(phl_info->phl_com, phl_info->hal);
#ifdef RTW_WKARD_PRELOAD_TRX_RESET
ops->trx_reset(phl_info, PHL_CTRL_TX|PHL_CTRL_RX);
#endif
if (hal_status != RTW_HAL_STATUS_SUCCESS)
return RTW_PHL_STATUS_FAILURE;
return RTW_PHL_STATUS_SUCCESS;
}
enum rtw_phl_status rtw_phl_start(void *phl)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
enum rtw_phl_status phl_status = RTW_PHL_STATUS_FAILURE;
enum rtw_hal_status hal_status = RTW_HAL_STATUS_SUCCESS;
#ifdef CONFIG_SYNC_INTERRUPT
struct rtw_phl_evt_ops *evt_ops = &phl_info->phl_com->evt_ops;
#endif /* CONFIG_SYNC_INTERRUPT */
hal_status = rtw_hal_start(phl_info->phl_com, phl_info->hal);
if (hal_status == RTW_HAL_STATUS_MAC_INIT_FAILURE) {
phl_status = RTW_PHL_STATUS_HAL_INIT_FAILURE;
goto error_hal_start;
} else if (hal_status == RTW_HAL_STATUS_BB_INIT_FAILURE) {
phl_status = RTW_PHL_STATUS_HAL_INIT_FAILURE;
goto error_hal_start;
} else if (hal_status == RTW_HAL_STATUS_RF_INIT_FAILURE) {
phl_status = RTW_PHL_STATUS_HAL_INIT_FAILURE;
goto error_hal_start;
} else if (hal_status == RTW_HAL_STATUS_BTC_INIT_FAILURE) {
phl_status = RTW_PHL_STATUS_HAL_INIT_FAILURE;
goto error_hal_start;
} else if (hal_status != RTW_HAL_STATUS_SUCCESS) {
phl_status = RTW_PHL_STATUS_HAL_INIT_FAILURE;
goto error_hal_start;
}
#ifdef CONFIG_LOAD_PHY_PARA_FROM_FILE
/* if no need keep para buf, phl_com->dev_sw_cap->keep_para_info = false*/
rtw_phl_init_free_para_buf(phl_info->phl_com);
#endif
#ifdef CONFIG_FSM
/* start FSM framework */
phl_status = phl_fsm_start(phl_info);
if (phl_status != RTW_PHL_STATUS_SUCCESS)
goto error_phl_fsm_start;
/* start FSM modules */
phl_status = phl_fsm_module_start(phl_info);
if (phl_status != RTW_PHL_STATUS_SUCCESS)
goto error_phl_fsm_module_start;
#endif
/* start modules */
phl_status = phl_module_start(phl_info);
if (phl_status != RTW_PHL_STATUS_SUCCESS)
goto error_phl_module_start;
phl_status = phl_datapath_start(phl_info);
if (phl_status != RTW_PHL_STATUS_SUCCESS)
goto error_phl_datapath_start;
#ifdef CONFIG_SYNC_INTERRUPT
evt_ops->set_interrupt_caps(phl_to_drvpriv(phl_info), true);
#else
rtw_hal_enable_interrupt(phl_info->phl_com, phl_info->hal);
#endif /* CONFIG_SYNC_INTERRUPT */
phl_info->phl_com->dev_state = RTW_DEV_WORKING;
phl_status = RTW_PHL_STATUS_SUCCESS;
return phl_status;
error_phl_datapath_start:
phl_module_stop(phl_info);
error_phl_module_start:
#ifdef CONFIG_FSM
phl_fsm_module_stop(phl_info);
error_phl_fsm_module_start:
phl_fsm_stop(phl_info);
error_phl_fsm_start:
#endif
rtw_hal_stop(phl_info->phl_com, phl_info->hal);
error_hal_start:
return phl_status;
}
static void _phl_interrupt_stop(struct phl_info_t *phl_info)
{
#ifdef CONFIG_SYNC_INTERRUPT
struct rtw_phl_evt_ops *evt_ops = &phl_info->phl_com->evt_ops;
do {
if (false == TEST_STATUS_FLAG(phl_info->phl_com->dev_state,
RTW_DEV_SURPRISE_REMOVAL))
evt_ops->set_interrupt_caps(phl_to_drvpriv(phl_info), false);
} while (false);
#else
do {
if (false == TEST_STATUS_FLAG(phl_info->phl_com->dev_state,
RTW_DEV_SURPRISE_REMOVAL))
rtw_hal_disable_interrupt(phl_info->phl_com, phl_info->hal);
} while (false);
#endif /* CONFIG_SYNC_INTERRUPT */
}
static enum rtw_phl_status _phl_cmd_send_msg_phy_on(struct phl_info_t *phl_info)
{
enum rtw_phl_status sts = RTW_PHL_STATUS_FAILURE;
sts = phl_cmd_enqueue(phl_info, HW_BAND_0, MSG_EVT_PHY_ON, NULL, 0, NULL,
PHL_CMD_WAIT, 1000);
if (is_cmd_failure(sts)) {
/* Send cmd success, but wait cmd fail*/
sts = RTW_PHL_STATUS_FAILURE;
} else if (sts != RTW_PHL_STATUS_SUCCESS) {
/* Send cmd fail */
sts = RTW_PHL_STATUS_FAILURE;
}
return sts;
}
void rtw_phl_stop(void *phl)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
_phl_cmd_send_msg_phy_on(phl_info);
_phl_interrupt_stop(phl_info);
phl_module_stop(phl_info);
#ifdef DBG_PHL_MR
phl_mr_info_dbg(phl_info);
#endif
#ifdef CONFIG_FSM
phl_fsm_module_stop(phl_info);
phl_fsm_stop(phl_info);
#endif
rtw_hal_stop(phl_info->phl_com, phl_info->hal);
phl_datapath_stop(phl_info);
phl_info->phl_com->dev_state = 0;
}
enum rtw_phl_status phl_wow_start(struct phl_info_t *phl_info, struct rtw_phl_stainfo_t *sta)
{
#ifdef CONFIG_WOWLAN
enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
enum rtw_hal_status hstatus = RTW_HAL_STATUS_FAILURE;
struct phl_wow_info *wow_info = phl_to_wow_info(phl_info);
#ifdef CONFIG_SYNC_INTERRUPT
struct rtw_phl_evt_ops *evt_ops = &phl_info->phl_com->evt_ops;
#endif /* CONFIG_SYNC_INTERRUPT */
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] %s enter with sta state(%d)\n.", __func__, sta->wrole->mstate);
phl_wow_decide_op_mode(wow_info, sta);
if (wow_info->op_mode == RTW_WOW_OP_PWR_DOWN) {
phl_cmd_role_suspend(phl_info);
rtw_phl_stop(phl_info);
/* since control path stopped after rtw_phl_stop,
below action don't have to migrate to general module*/
hstatus = rtw_hal_set_wowlan(phl_info->phl_com, phl_info->hal, true);
if (RTW_HAL_STATUS_SUCCESS != hstatus)
PHL_WARN("[wow] rtw_hal_set_wowlan failed, status(%u)\n", hstatus);
pstatus = RTW_PHL_STATUS_SUCCESS;
} else {
/* stop all active features */
#ifdef CONFIG_WOW_WITH_SER
rtw_hal_ser_ctrl(phl_info->hal, false);
#endif
pstatus = phl_module_stop(phl_info);
if (RTW_PHL_STATUS_SUCCESS != pstatus) {
PHL_ERR("[wow] phl_module_stop failed.\n");
goto end;
}
/* since control path stopped after phl_module_stop,
below action don't have to migrate to general module*/
#ifdef CONFIG_FSM
pstatus = phl_fsm_module_stop(phl_info);
if (RTW_PHL_STATUS_SUCCESS != pstatus) {
PHL_ERR("[wow] phl_fsm_module_stop failed.\n");
goto end;
}
pstatus = phl_fsm_stop(phl_info);
if (RTW_PHL_STATUS_SUCCESS != pstatus) {
PHL_ERR("[wow] phl_fsm_stop failed.\n");
goto end;
}
#endif
hstatus = rtw_hal_set_wowlan(phl_info->phl_com, phl_info->hal, true);
if (RTW_HAL_STATUS_SUCCESS != hstatus)
PHL_WARN("[wow] rtw_hal_set_wowlan failed, status(%u)\n", hstatus);
pstatus = phl_wow_init_precfg(wow_info);
if (RTW_PHL_STATUS_SUCCESS != pstatus) {
PHL_ERR("[wow] phl_wow_init_precfg failed.\n");
goto end;
}
hstatus = rtw_hal_wow_init(phl_info->phl_com, phl_info->hal, sta);
if (RTW_HAL_STATUS_SUCCESS != hstatus) {
pstatus = RTW_PHL_STATUS_FAILURE;
goto end;
}
pstatus = phl_wow_func_en(wow_info);
if (RTW_PHL_STATUS_SUCCESS != pstatus)
goto end;
#ifdef CONFIG_POWER_SAVE
/* power saving */
phl_wow_ps_pctl_cfg(wow_info, true);
#endif
pstatus = phl_wow_init_postcfg(wow_info);
if (RTW_PHL_STATUS_SUCCESS != pstatus) {
PHL_ERR("[wow] phl_wow_init_postcfg failed.\n");
goto end;
}
#ifdef CONFIG_WOW_WITH_SER
rtw_hal_ser_ctrl(phl_info->hal, true);
#endif
#ifdef CONFIG_POWER_SAVE
/* power saving */
phl_wow_ps_pwr_cfg(wow_info, true);
#endif
pstatus = RTW_PHL_STATUS_SUCCESS;
}
end:
if (RTW_PHL_STATUS_SUCCESS != pstatus) {
#ifdef CONFIG_SYNC_INTERRUPT
evt_ops->set_interrupt_caps(phl_to_drvpriv(phl_info), false);
#else
rtw_hal_disable_interrupt(phl_info->phl_com, phl_info->hal);
#endif /* CONFIG_SYNC_INTERRUPT */
rtw_hal_stop(phl_info->phl_com, phl_info->hal);
phl_datapath_stop(phl_info);
wow_info->op_mode = RTW_WOW_OP_PWR_DOWN;
PHL_ERR("[wow] %s fail, set op_mode %d!\n", __func__, wow_info->op_mode);
} else {
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_,
"[wow] %s success, with func_en %d, op_mode %d.\n",
__func__, wow_info->func_en, wow_info->op_mode);
}
return pstatus;
#else
return RTW_PHL_STATUS_SUCCESS;
#endif /* CONFIG_WOWLAN */
}
static void _wow_stop_reinit(struct phl_info_t *phl_info)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
PHL_WARN("%s : reset hw!\n", __func__);
rtw_hal_hal_deinit(phl_info->phl_com, phl_info->hal);
phl_datapath_stop(phl_info);
pstatus = rtw_phl_start(phl_info);
if (pstatus)
PHL_ERR("%s : rtw_phl_start fail!\n", __func__);
phl_cmd_role_recover(phl_info);
}
void phl_wow_stop(struct phl_info_t *phl_info, struct rtw_phl_stainfo_t *sta, u8 *hw_reinit)
{
#ifdef CONFIG_WOWLAN
enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
enum rtw_hal_status hstatus = RTW_HAL_STATUS_FAILURE;
struct phl_wow_info *wow_info = phl_to_wow_info(phl_info);
u8 reset = 0;
if (rtw_hal_get_pwr_state(phl_info->hal, &wow_info->mac_pwr)
!= RTW_HAL_STATUS_SUCCESS)
return;
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "%s enter with mac power %d\n.",
__func__, wow_info->mac_pwr);
if (wow_info->mac_pwr != RTW_MAC_PWR_OFF) {
#ifdef CONFIG_WOW_WITH_SER
rtw_hal_ser_ctrl(phl_info->hal, false);
#endif
#ifdef CONFIG_POWER_SAVE
/* leave clock/power gating */
pstatus = phl_wow_leave_low_power(wow_info);
if (RTW_PHL_STATUS_SUCCESS != pstatus) {
PHL_ERR("[wow] HW leave power saving failed.\n");
_wow_stop_reinit(phl_info);
*hw_reinit = true;
return;
}
#endif
}
hstatus = rtw_hal_set_wowlan(phl_info->phl_com, phl_info->hal, false);
if (RTW_HAL_STATUS_SUCCESS != hstatus) {
PHL_WARN("[wow] rtw_hal_set_wowlan failed, status(%u)\n", hstatus);
}
if (wow_info->mac_pwr == RTW_MAC_PWR_OFF) {
if (wow_info->op_mode == RTW_WOW_OP_PWR_DOWN) {
pstatus = rtw_phl_start(phl_info);
phl_role_recover(phl_info);
*hw_reinit = true;
} else {
PHL_WARN("[wow] enter suspend with wow enabled but mac is power down\n");
_wow_stop_reinit(phl_info);
*hw_reinit = true;
}
} else if (wow_info->mac_pwr == RTW_MAC_PWR_ON ||
wow_info->mac_pwr == RTW_MAC_PWR_LPS) {
phl_wow_handle_wake_rsn(wow_info, &reset);
if (reset) {
_wow_stop_reinit(phl_info);
*hw_reinit = true;
return;
}
phl_wow_deinit_precfg(wow_info);
rtw_hal_fw_dbg_dump(phl_info->hal, false);
#ifdef CONFIG_POWER_SAVE
/* leave power saving */
phl_wow_ps_pctl_cfg(wow_info, false);
#endif
phl_wow_func_dis(wow_info);
hstatus = rtw_hal_wow_deinit(phl_info->phl_com, phl_info->hal, sta);
if (hstatus)
PHL_ERR("%s : rtw_hal_wow_deinit failed.\n", __func__);
phl_module_start(phl_info);
#ifdef CONFIG_FSM
phl_fsm_start(phl_info);
phl_fsm_module_start(phl_info);
#endif
phl_wow_deinit_postcfg(wow_info);
#ifdef CONFIG_WOW_WITH_SER
rtw_hal_ser_ctrl(phl_info->hal, true);
#endif
*hw_reinit = false;
} else {
PHL_ERR("%s : unexpected mac pwr state %d.\n", __func__, wow_info->mac_pwr);
}
#endif /* CONFIG_WOWLAN */
}
enum rtw_phl_status rtw_phl_rf_on(void *phl)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
enum rtw_phl_status phl_status = RTW_PHL_STATUS_FAILURE;
enum rtw_hal_status hal_status = RTW_HAL_STATUS_SUCCESS;
#ifdef CONFIG_SYNC_INTERRUPT
struct rtw_phl_evt_ops *evt_ops = &phl_info->phl_com->evt_ops;
#endif /* CONFIG_SYNC_INTERRUPT */
struct phl_data_ctl_t ctl = {0};
hal_status = rtw_hal_start(phl_info->phl_com, phl_info->hal);
if (hal_status == RTW_HAL_STATUS_MAC_INIT_FAILURE) {
phl_status = RTW_PHL_STATUS_HAL_INIT_FAILURE;
goto error_hal_start;
} else if (hal_status == RTW_HAL_STATUS_BB_INIT_FAILURE) {
phl_status = RTW_PHL_STATUS_HAL_INIT_FAILURE;
goto error_hal_start;
} else if (hal_status == RTW_HAL_STATUS_RF_INIT_FAILURE) {
phl_status = RTW_PHL_STATUS_HAL_INIT_FAILURE;
goto error_hal_start;
} else if (hal_status == RTW_HAL_STATUS_BTC_INIT_FAILURE) {
phl_status = RTW_PHL_STATUS_HAL_INIT_FAILURE;
goto error_hal_start;
}
phl_role_recover(phl_info);
#ifdef CONFIG_SYNC_INTERRUPT
evt_ops->set_interrupt_caps(phl_to_drvpriv(phl_info), true);
#else
rtw_hal_enable_interrupt(phl_info->phl_com, phl_info->hal);
#endif /* CONFIG_SYNC_INTERRUPT */
ctl.id = PHL_MDL_POWER_MGNT;
ctl.cmd = PHL_DATA_CTL_SW_TX_RESUME;
if (phl_data_ctrler(phl_info, &ctl, NULL) != RTW_PHL_STATUS_SUCCESS)
PHL_WARN("%s: tx resume fail!\n", __func__);
ctl.cmd = PHL_DATA_CTL_SW_RX_RESUME;
if (phl_data_ctrler(phl_info, &ctl, NULL) != RTW_PHL_STATUS_SUCCESS)
PHL_WARN("%s: rx resume fail!\n", __func__);
return RTW_PHL_STATUS_SUCCESS;
error_hal_start:
PHL_ERR("error_hal_start\n");
return phl_status;
}
#define MAX_RF_OFF_STOP_TRX_TIME 100 /* ms */
enum rtw_phl_status rtw_phl_rf_off(void *phl)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
#ifdef CONFIG_SYNC_INTERRUPT
struct rtw_phl_evt_ops *evt_ops = &phl_info->phl_com->evt_ops;
#endif /* CONFIG_SYNC_INTERRUPT */
struct phl_data_ctl_t ctl = {0};
#ifdef CONFIG_SYNC_INTERRUPT
evt_ops->set_interrupt_caps(phl_to_drvpriv(phl_info), false);
#else
rtw_hal_disable_interrupt(phl_info->phl_com, phl_info->hal);
#endif /* CONFIG_SYNC_INTERRUPT */
ctl.id = PHL_MDL_POWER_MGNT;
ctl.cmd = PHL_DATA_CTL_SW_TX_PAUSE;
if (phl_data_ctrler(phl_info, &ctl, NULL) != RTW_PHL_STATUS_SUCCESS)
PHL_WARN("%s: tx pause fail!\n", __func__);
ctl.cmd = PHL_DATA_CTL_SW_RX_PAUSE;
if (phl_data_ctrler(phl_info, &ctl, NULL) != RTW_PHL_STATUS_SUCCESS)
PHL_WARN("%s: rx pause fail!\n", __func__);
phl_role_suspend(phl_info);
rtw_hal_stop(phl_info->phl_com, phl_info->hal);
ctl.cmd = PHL_DATA_CTL_SW_TX_RESET;
if (phl_data_ctrler(phl_info, &ctl, NULL) != RTW_PHL_STATUS_SUCCESS)
PHL_WARN("%s: tx reset fail!\n", __func__);
ctl.cmd = PHL_DATA_CTL_SW_RX_RESET;
if (phl_data_ctrler(phl_info, &ctl, NULL) != RTW_PHL_STATUS_SUCCESS)
PHL_WARN("%s: rx reset fail!\n", __func__);
return RTW_PHL_STATUS_SUCCESS;
}
enum rtw_phl_status rtw_phl_suspend(void *phl, struct rtw_phl_stainfo_t *sta, u8 wow_en)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
enum rtw_phl_status pstatus = RTW_PHL_STATUS_SUCCESS;
PHL_INFO("%s enter with wow_en(%d)\n.", __func__, wow_en);
#ifdef CONFIG_WOWLAN
pstatus = _phl_cmd_send_msg_phy_on(phl_info);
if (RTW_PHL_STATUS_SUCCESS != pstatus) {
PHL_ERR("[wow] _phl_cmd_send_msg_phy_on fail!\n");
wow_en = false;
}
if (wow_en) {
pstatus = phl_wow_start(phl_info, sta);
} else {
phl_cmd_role_suspend(phl_info);
rtw_phl_stop(phl);
}
#else
PHL_INFO("%s enter with wow_en(%d)\n.", __func__, wow_en);
phl_cmd_role_suspend(phl_info);
rtw_phl_stop(phl);
#endif
FUNCOUT_WSTS(pstatus);
return pstatus;
}
enum rtw_phl_status rtw_phl_resume(void *phl, struct rtw_phl_stainfo_t *sta, u8 *hw_reinit)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
enum rtw_phl_status pstatus = RTW_PHL_STATUS_SUCCESS;
#ifdef CONFIG_WOWLAN
struct phl_wow_info *wow_info = phl_to_wow_info(phl_info);
#endif
/**
* Since some platforms require performance when device resuming, we need
* to finish "rtw_phl_resume" as fast as possible. In this situation, we
* prevent ps module entering any power saving mechanisms and try to do I/O
* operations directly without issue commands to cmd dispatcher. Therefore,
* ps module will not enter power saving if device state "RTW_DEV_RESUMING"
* is set. If device state "RTW_DEV_RESUMING" is set, operations of I/O
* should check whether the current power state can perform I/O or not and
* perform I/O directly without issuing commands to cmd dispatcher if device
* power state is I/O allowable. This kind of flow is only suitable for
* "rtw_phl_resume" because core layer will not perform any other tasks when
* calling rtw_phl_resume which is relatively simple enough.
*/
PHL_INFO("%s enter...\n.", __func__);
SET_STATUS_FLAG(phl_info->phl_com->dev_state, RTW_DEV_RESUMING);
#ifdef CONFIG_WOWLAN
if (wow_info->op_mode != RTW_WOW_OP_NONE) {
phl_wow_stop(phl_info, sta, hw_reinit);
} else {
pstatus = rtw_phl_start(phl);
#ifdef CONFIG_POWER_SAVE
if (phl_ps_get_cur_pwr_lvl(phl_info) == PS_PWR_LVL_PWRON)
#endif
phl_role_recover(phl_info);
*hw_reinit = true;
}
#if defined(RTW_WKARD_WOW_L2_PWR) && defined(CONFIG_PCI_HCI)
rtw_hal_set_l2_leave(phl_info->hal);
#endif
phl_record_wow_stat(wow_info);
phl_reset_wow_info(wow_info);
#else
pstatus = rtw_phl_start(phl);
#ifdef CONFIG_POWER_SAVE
if (phl_ps_get_cur_pwr_lvl(phl_info) == PS_PWR_LVL_PWRON)
#endif
phl_role_recover(phl_info);
*hw_reinit = true;
#endif
CLEAR_STATUS_FLAG(phl_info->phl_com->dev_state, RTW_DEV_RESUMING);
PHL_INFO("%s exit with hw_reinit %d.\n.", __func__, *hw_reinit);
return pstatus;
}
enum rtw_phl_status rtw_phl_reset(void *phl)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct phl_hci_trx_ops *ops = phl_info->hci_trx_ops;
enum rtw_phl_status phl_status = RTW_PHL_STATUS_FAILURE;
if(rtw_phl_is_init_completed(phl_info))
phl_status = RTW_PHL_STATUS_SUCCESS;
rtw_hal_stop(phl_info->phl_com, phl_info->hal);
ops->trx_reset(phl_info, PHL_CTRL_TX|PHL_CTRL_RX);
ops->trx_resume(phl_info, PHL_CTRL_TX|PHL_CTRL_RX);
rtw_hal_start(phl_info->phl_com, phl_info->hal);
/* Leave power save */
/* scan abort */
/* STA disconnect/stop AP/Stop p2p function */
return phl_status;
}
enum rtw_phl_status rtw_phl_restart(void *phl)
{
enum rtw_phl_status phl_status = RTW_PHL_STATUS_FAILURE;
phl_status = RTW_PHL_STATUS_SUCCESS;
return phl_status;
}
/******************* IO APIs *******************/
u8 rtw_phl_read8(void *phl, u32 addr)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
return rtw_hal_read8(phl_info->hal, addr);
}
u16 rtw_phl_read16(void *phl, u32 addr)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
return rtw_hal_read16(phl_info->hal, addr);
}
u32 rtw_phl_read32(void *phl, u32 addr)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
return rtw_hal_read32(phl_info->hal, addr);
}
void rtw_phl_write8(void *phl, u32 addr, u8 val)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
rtw_hal_write8(phl_info->hal, addr, val);
}
void rtw_phl_write16(void *phl, u32 addr, u16 val)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
rtw_hal_write16(phl_info->hal, addr, val);
}
void rtw_phl_write32(void *phl, u32 addr, u32 val)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
rtw_hal_write32(phl_info->hal, addr, val);
}
u32 rtw_phl_read_macreg(void *phl, u32 offset, u32 bit_mask)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
return rtw_hal_read_macreg(phl_info->hal, offset, bit_mask);
}
void rtw_phl_write_macreg(void *phl,
u32 offset, u32 bit_mask, u32 data)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
rtw_hal_write_macreg(phl_info->hal, offset, bit_mask, data);
}
u32 rtw_phl_read_bbreg(void *phl, u32 offset, u32 bit_mask)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
return rtw_hal_read_bbreg(phl_info->hal, offset, bit_mask);
}
void rtw_phl_write_bbreg(void *phl,
u32 offset, u32 bit_mask, u32 data)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
rtw_hal_write_bbreg(phl_info->hal, offset, bit_mask, data);
}
u32 rtw_phl_read_rfreg(void *phl,
enum rf_path path, u32 offset, u32 bit_mask)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
return rtw_hal_read_rfreg(phl_info->hal, path, offset, bit_mask);
}
void rtw_phl_write_rfreg(void *phl,
enum rf_path path, u32 offset, u32 bit_mask, u32 data)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
rtw_hal_write_rfreg(phl_info->hal, path, offset, bit_mask, data);
}
void rtw_phl_restore_interrupt(void *phl)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
rtw_hal_restore_interrupt(phl_info->phl_com, phl_info->hal);
}
enum rtw_phl_status rtw_phl_interrupt_handler(void *phl)
{
enum rtw_phl_status phl_status = RTW_PHL_STATUS_SUCCESS;
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
u32 int_hdler_msk = 0x0;
#ifdef CONFIG_SYNC_INTERRUPT
struct rtw_phl_evt_ops *ops = &phl_info->phl_com->evt_ops;
#endif /* CONFIG_SYNC_INTERRUPT */
int_hdler_msk = rtw_hal_interrupt_handler(phl_info->hal);
if (!int_hdler_msk) {
PHL_WARN("%s : 0x%x\n", __func__, int_hdler_msk);
phl_status = RTW_PHL_STATUS_FAILURE;
goto end;
}
PHL_DBG("%s : 0x%x\n", __func__, int_hdler_msk);
/* beacon interrupt */
if (int_hdler_msk & BIT0)
;/* todo */
/* rx interrupt */
if (int_hdler_msk & BIT1) {
#if defined(CONFIG_SDIO_HCI) && defined(CONFIG_PHL_SDIO_READ_RXFF_IN_INT)
phl_info->hci_trx_ops->recv_rxfifo(phl);
#else
phl_status = rtw_phl_start_rx_process(phl);
#endif
#if defined(CONFIG_PCI_HCI) && !defined(CONFIG_DYNAMIC_RX_BUF)
/* phl_status = hci_trx_ops->recycle_busy_wd(phl); */
#endif
}
/* tx interrupt */
if (int_hdler_msk & BIT2)
;
/* cmd interrupt */
if (int_hdler_msk & BIT3)
;/* todo */
/* halt c2h interrupt */
if (int_hdler_msk & BIT4)
phl_status = phl_ser_send_msg(phl, RTW_PHL_SER_EVENT_CHK);
/* halt c2h interrupt */
if (int_hdler_msk & BIT5)
phl_status = phl_fw_watchdog_timeout_notify(phl);
/* halt c2h interrupt - send msg to SER FSM to check ser event */
if (int_hdler_msk & BIT6)
phl_status = phl_ser_send_msg(phl, RTW_PHL_SER_EVENT_CHK);
if (phl_status != RTW_PHL_STATUS_SUCCESS)
PHL_INFO("rtw_phl_interrupt_handler fail !!\n");
/* schedule tx process */
phl_status = phl_schedule_handler(phl_info->phl_com, &phl_info->phl_tx_handler);
end:
#ifdef CONFIG_SYNC_INTERRUPT
ops->interrupt_restore(phl_to_drvpriv(phl_info), false);
#endif
return phl_status;
}
void rtw_phl_enable_interrupt(void *phl)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
rtw_hal_enable_interrupt(phl_info->phl_com, phl_info->hal);
}
void rtw_phl_disable_interrupt(void *phl)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
rtw_hal_disable_interrupt(phl_info->phl_com, phl_info->hal);
}
bool rtw_phl_recognize_interrupt(void *phl)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
return rtw_hal_recognize_interrupt(phl_info->hal);
}
void rtw_phl_clear_interrupt(void *phl)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
rtw_hal_clear_interrupt(phl_info->hal);
}
enum rtw_phl_status rtw_phl_msg_hub_register_recver(void* phl,
struct phl_msg_receiver* ctx, enum phl_msg_recver_layer layer)
{
return phl_msg_hub_register_recver(phl, ctx, layer);
}
enum rtw_phl_status rtw_phl_msg_hub_update_recver_mask(void* phl,
enum phl_msg_recver_layer layer, u8* mdl_id, u32 len, u8 clr)
{
return phl_msg_hub_update_recver_mask(phl, layer, mdl_id, len, clr);
}
enum rtw_phl_status rtw_phl_msg_hub_deregister_recver(void* phl,
enum phl_msg_recver_layer layer)
{
return phl_msg_hub_deregister_recver(phl, layer);
}
enum rtw_phl_status rtw_phl_msg_hub_send(void* phl,
struct phl_msg_attribute* attr, struct phl_msg* msg)
{
return phl_msg_hub_send((struct phl_info_t*)phl, attr, msg);
}
#ifdef PHL_PLATFORM_LINUX
void rtw_phl_mac_reg_dump(void *sel, void *phl)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
rtw_hal_mac_reg_dump(sel, phl_info->hal);
}
void rtw_phl_bb_reg_dump(void *sel, void *phl)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
rtw_hal_bb_reg_dump(sel, phl_info->hal);
}
void rtw_phl_bb_reg_dump_ex(void *sel, void *phl)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
rtw_hal_bb_reg_dump_ex(sel, phl_info->hal);
}
void rtw_phl_rf_reg_dump(void *sel, void *phl)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
rtw_hal_rf_reg_dump(sel, phl_info->hal);
}
#endif
/**
* rtw_phl_get_sec_cam() - get the security cam raw data from HW
* @phl: struct phl_info_t *
* @num: How many cam you wnat to dump from the first one.
* @buf: ptr to buffer which store the content from HW.
* If buf is NULL, use console as debug path.
* @size Size of allocated memroy for @buf.
* The size should be @num * size of security cam offset(0x20).
*
* Return true when function successfully works, otherwise, return fail.
*/
bool rtw_phl_get_sec_cam(void *phl, u16 num, u8 *buf, u16 size)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
enum rtw_hal_status ret = RTW_HAL_STATUS_SUCCESS;
ret = rtw_hal_get_sec_cam(phl_info->hal, num, buf, size);
if (ret != RTW_HAL_STATUS_SUCCESS)
return false;
return true;
}
/**
* rtw_phl_get_addr_cam() - get the address cam raw data from HW
* @phl: struct phl_info_t *
* @num: How many cam you wnat to dump from the first one.
* @buf: ptr to buffer which store the content from HW.
* If buf is NULL, use console as debug path.
* @size Size of allocated memroy for @buf.
* The size should be @num * size of Addr cam offset(0x40).
*
* Return true when function successfully works, otherwise, return fail.
*/
bool rtw_phl_get_addr_cam(void *phl, u16 num, u8 *buf, u16 size)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
enum rtw_hal_status ret = RTW_HAL_STATUS_SUCCESS;
ret = rtw_hal_get_addr_cam(phl_info->hal, num, buf, size);
if (ret != RTW_HAL_STATUS_SUCCESS)
return false;
return true;
}
void rtw_phl_mac_dbg_status_dump(void *phl, u32 *val, u8 *en)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct hal_mac_dbg_dump_cfg cfg = {0};
cfg.ss_dbg_0 = val[0];
cfg.ss_dbg_1 = val[1];
cfg.ss_dbg = (*en & BIT0);
cfg.dle_dbg = (*en & BIT1) >> 1;
cfg.dmac_dbg = (*en & BIT2) >> 2;
cfg.cmac_dbg = (*en & BIT3) >> 3;
cfg.mac_dbg_port = (*en & BIT4) >> 4;
cfg.plersvd_dbg = (*en & BIT5) >> 5;
cfg.tx_flow_dbg = (*en & BIT6) >> 6;
rtw_hal_dbg_status_dump(phl_info->hal, &cfg);
}
enum rtw_phl_status rtw_phl_get_mac_addr_efuse(void* phl, u8 *addr)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_SUCCESS;
enum rtw_hal_status hstatus = RTW_HAL_STATUS_SUCCESS;
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
void *d = phl_to_drvpriv(phl_info);
u8 addr_efuse[MAC_ADDRESS_LENGTH] = {0};
hstatus = rtw_hal_get_efuse_info(phl_info->hal,
EFUSE_INFO_MAC_ADDR,
(void *)addr_efuse,
MAC_ADDRESS_LENGTH);
if (is_broadcast_mac_addr(addr_efuse)) {
PHL_INFO("[WARNING] MAC Address from EFUSE is FF:FF:FF:FF:FF:FF\n");
hstatus = RTW_HAL_STATUS_FAILURE;
}
if (RTW_HAL_STATUS_SUCCESS != hstatus) {
pstatus = RTW_PHL_STATUS_FAILURE;
} else {
_os_mem_cpy(d, addr, addr_efuse, MAC_ADDRESS_LENGTH);
PHL_INFO("%s : 0x%2x - 0x%2x - 0x%2x - 0x%2x - 0x%2x - 0x%2x\n",
__func__, addr[0], addr[1], addr[2],
addr[3], addr[4], addr[5]);
}
return pstatus;
}
enum rtw_phl_status
rtw_phl_cfg_trx_path(void* phl, enum rf_path tx, u8 tx_nss,
enum rf_path rx, u8 rx_nss)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_SUCCESS;
enum rtw_hal_status hstatus = RTW_HAL_STATUS_SUCCESS;
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
hstatus = rtw_hal_cfg_trx_path(phl_info->hal, tx, tx_nss, rx, rx_nss);
if (RTW_HAL_STATUS_SUCCESS != hstatus)
pstatus = RTW_PHL_STATUS_FAILURE;
return pstatus;
}
void rtw_phl_reset_stat_ma_rssi(struct rtw_phl_com_t *phl_com)
{
u8 i = 0, j = 0;
PHL_INFO("--> %s\n", __func__);
do{
if (NULL == phl_com)
break;
_os_spinlock(phl_com->drv_priv,
&(phl_com->rssi_stat.lock), _bh, NULL);
for (i = 0; i < RTW_RSSI_TYPE_MAX; i++) {
phl_com->rssi_stat.ma_rssi_ele_idx[i] = 0;
phl_com->rssi_stat.ma_rssi_ele_cnt[i] = 0;
phl_com->rssi_stat.ma_rssi_ele_sum[i] = 0;
phl_com->rssi_stat.ma_rssi[i] = 0;
for (j = 0; j < PHL_RSSI_MAVG_NUM; j++)
phl_com->rssi_stat.ma_rssi_ele[i][j] = 0;
}
_os_spinunlock(phl_com->drv_priv,
&(phl_com->rssi_stat.lock), _bh, NULL);
} while (0);
PHL_INFO("<-- %s\n", __func__);
}
u8
rtw_phl_get_ma_rssi(struct rtw_phl_com_t *phl_com,
enum rtw_rssi_type rssi_type)
{
u8 ret = 0;
if (NULL == phl_com)
return ret;
_os_spinlock(phl_com->drv_priv,
&(phl_com->rssi_stat.lock), _bh, NULL);
ret = phl_com->rssi_stat.ma_rssi[rssi_type];
_os_spinunlock(phl_com->drv_priv,
&(phl_com->rssi_stat.lock), _bh, NULL);
return ret;
}
#ifdef RTW_WKARD_DYNAMIC_BFEE_CAP
enum rtw_phl_status
rtw_phl_bfee_ctrl(void *phl, struct rtw_wifi_role_t *wrole, bool ctrl)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
enum rtw_phl_status pstatus = RTW_PHL_STATUS_SUCCESS;
if (RTW_HAL_STATUS_SUCCESS !=
rtw_hal_bf_bfee_ctrl(phl_info->hal, wrole->hw_band, ctrl)) {
pstatus = RTW_PHL_STATUS_FAILURE;
}
return pstatus;
}
#endif
u8
rtw_phl_get_sta_mgnt_rssi(struct rtw_phl_stainfo_t *psta)
{
u8 ret = PHL_MAX_RSSI;
if (psta != NULL) {
ret = psta->hal_sta->rssi_stat.ma_rssi_mgnt;
}
return ret;
}
|
2301_81045437/rtl8852be
|
phl/phl_init.c
|
C
|
agpl-3.0
| 67,674
|
/******************************************************************************
*
* Copyright(c) 2019 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#define _PHL_LED_C_
#include "phl_headers.h"
#define PHL_LED_INTERVALS_ARR_LEN_MAX 4
struct phl_led_event_args_t {
enum rtw_led_state state_condition;
struct rtw_led_action_args_t *action_args_arr;
u8 action_args_arr_len;
u32 toggle_delay_unit;
struct phl_led_event_args_t *next;
};
struct phl_led_timer_args_t {
struct phl_info_t *phl_info;
_os_timer timer;
u32 delay_unit;
bool timer_alive;
u32 led_manage_mask;
};
struct phl_led_info_t {
enum rtw_led_ctrl_mode ctrl_mode;
enum rtw_led_ctrl_mode reg_ctrl_mode;
enum rtw_led_opt curr_opt;
const struct rtw_led_toggle_args_t *toggle_args;
struct phl_led_timer_args_t *toggle_timer_args;
u32 toggle_interval_counter;
u32 toggle_start_delay_counter;
bool toggle_start_delay_over;
u32 toggle_loop_counter;
u8 toggle_curr_interval_idx;
};
struct phl_led_ctrl_t {
struct phl_led_info_t led_info_arr[RTW_LED_ID_LENGTH];
struct phl_led_event_args_t *event_args_list_arr[RTW_LED_EVENT_LENGTH];
struct rtw_led_intervals_t intervals_arr[PHL_LED_INTERVALS_ARR_LEN_MAX];
enum rtw_led_state state;
};
static void _phl_led_timer_release(struct phl_led_timer_args_t *timer_args)
{
void *drv_priv = phl_to_drvpriv(timer_args->phl_info);
PHL_TRACE(COMP_PHL_LED, _PHL_INFO_, "%s: led_manage_mask == 0x%x\n",
__func__, timer_args->led_manage_mask);
_os_cancel_timer(drv_priv, &(timer_args->timer));
_os_release_timer(drv_priv, &(timer_args->timer));
_os_mem_free(drv_priv, timer_args, sizeof(struct phl_led_timer_args_t));
}
static void _phl_led_remove_from_timer(struct phl_led_info_t *led_info,
enum rtw_led_id led_id)
{
u32 *mask = NULL;
if (led_info->toggle_timer_args != NULL) {
mask = &(led_info->toggle_timer_args->led_manage_mask);
*mask = ~((~*mask) | BIT(led_id));
if (*mask == 0)
led_info->toggle_timer_args->timer_alive = false;
led_info->toggle_timer_args = NULL;
led_info->toggle_args = NULL;
}
}
static void
_phl_led_all_remove_from_timer(struct phl_led_timer_args_t *timer_args)
{
enum rtw_led_id led_id = 0;
struct phl_led_ctrl_t *led_ctrl =
(struct phl_led_ctrl_t *)(timer_args->phl_info->led_ctrl);
for (led_id = 0; led_id < RTW_LED_ID_LENGTH; led_id++) {
if ((timer_args->led_manage_mask & BIT(led_id)) == 0)
continue;
_phl_led_remove_from_timer(&(led_ctrl->led_info_arr[led_id]),
led_id);
}
}
static void _phl_led_timer_cb(void *args)
{
struct phl_led_timer_args_t *timer_args =
(struct phl_led_timer_args_t *) args;
struct phl_msg msg = {0};
struct phl_msg_attribute attr = {0};
SET_MSG_MDL_ID_FIELD(msg.msg_id, PHL_MDL_LED);
SET_MSG_EVT_ID_FIELD(msg.msg_id, MSG_EVT_LED_TICK);
msg.band_idx = HW_BAND_0;
msg.inbuf = (u8 *)(timer_args);
msg.inlen = sizeof(struct phl_led_timer_args_t);
phl_disp_eng_send_msg(timer_args->phl_info, &msg, &attr, NULL);
}
static enum rtw_phl_status _phl_led_ctrl_write_opt(void *hal,
enum rtw_led_id led_id,
enum rtw_led_opt *curr_opt,
enum rtw_led_opt opt)
{
if (opt >= RTW_LED_OPT_UNKNOWN) {
PHL_TRACE(COMP_PHL_LED, _PHL_INFO_, "%s: unknown opt (%d)\n",
__func__, opt);
return RTW_PHL_STATUS_FAILURE;
}
if (RTW_HAL_STATUS_SUCCESS != rtw_hal_led_control(hal, led_id, opt))
return RTW_PHL_STATUS_FAILURE;
*curr_opt = opt;
return RTW_PHL_STATUS_SUCCESS;
}
static enum rtw_phl_status
_phl_led_ctrl_start_delay_hdlr(void *hal, struct phl_led_info_t *led_info,
enum rtw_led_id led_id)
{
if (led_info->toggle_start_delay_counter >=
led_info->toggle_args->start_delay) {
led_info->toggle_start_delay_over = true;
PHL_TRACE(COMP_PHL_LED, _PHL_INFO_, "%s: start delay is over\n",
__func__);
if (RTW_PHL_STATUS_SUCCESS !=
_phl_led_ctrl_write_opt(hal, led_id, &(led_info->curr_opt),
led_info->toggle_args->start_opt))
return RTW_PHL_STATUS_FAILURE;
return RTW_PHL_STATUS_SUCCESS;
}
(led_info->toggle_start_delay_counter)++;
return RTW_PHL_STATUS_SUCCESS;
}
static enum rtw_phl_status
_phl_led_ctrl_interval_hdlr(void *hal, struct phl_led_info_t *led_info,
enum rtw_led_id led_id,
struct rtw_led_intervals_t *intervals)
{
u32 interval = 0;
enum rtw_led_opt opt = RTW_LED_OPT_UNKNOWN;
if (intervals == NULL) {
PHL_TRACE(COMP_PHL_LED, _PHL_INFO_, "%s: intervals == NULL\n",
__func__);
return RTW_PHL_STATUS_FAILURE;
}
if (intervals->interval_arr == NULL) {
PHL_TRACE(COMP_PHL_LED, _PHL_INFO_,
"%s: interval_arr == NULL\n", __func__);
return RTW_PHL_STATUS_FAILURE;
}
if (led_info->toggle_curr_interval_idx >= intervals->len) {
PHL_TRACE(
COMP_PHL_LED, _PHL_INFO_,
"%s: curr_interval_idx ( %d ) >= intervals' len ( %d )\n",
__func__, led_info->toggle_curr_interval_idx,
intervals->len);
return RTW_PHL_STATUS_FAILURE;
}
interval = intervals->interval_arr[led_info->toggle_curr_interval_idx];
PHL_TRACE(COMP_PHL_LED, _PHL_INFO_,
"%s: curr_interval_idx == %d, interval == %d, "
"interval_counter == %d\n",
__func__, led_info->toggle_curr_interval_idx, interval,
led_info->toggle_interval_counter);
if (interval > ++(led_info->toggle_interval_counter))
/* it is not time to toggle */
return RTW_PHL_STATUS_SUCCESS;
led_info->toggle_interval_counter = 0;
/* set curr_interval_idx to next */
if (++(led_info->toggle_curr_interval_idx) >= intervals->len) {
led_info->toggle_curr_interval_idx = 0;
if (led_info->toggle_args->loop > 0)
(led_info->toggle_loop_counter)++;
}
PHL_TRACE(COMP_PHL_LED, _PHL_INFO_, "%s: toggle led_id: %d\n", __func__,
led_id);
if (led_info->curr_opt == RTW_LED_OPT_LOW)
opt = RTW_LED_OPT_HIGH;
else if (led_info->curr_opt == RTW_LED_OPT_HIGH)
opt = RTW_LED_OPT_LOW;
else {
PHL_TRACE(COMP_PHL_LED, _PHL_INFO_,
"%s: incorrect curr_opt ( %d ). led_id: %d\n",
__func__, led_info->curr_opt, led_id);
return RTW_PHL_STATUS_FAILURE;
}
if (RTW_PHL_STATUS_SUCCESS !=
_phl_led_ctrl_write_opt(hal, led_id, &(led_info->curr_opt), opt))
return RTW_PHL_STATUS_FAILURE;
return RTW_PHL_STATUS_SUCCESS;
}
static enum rtw_phl_status
_phl_led_ctrl_toggle_hdlr(struct phl_led_timer_args_t *timer_args)
{
enum rtw_phl_status status = RTW_PHL_STATUS_SUCCESS;
void *drv_priv = phl_to_drvpriv(timer_args->phl_info);
enum rtw_led_id led_id = 0;
struct phl_led_ctrl_t *led_ctrl =
(struct phl_led_ctrl_t *)(timer_args->phl_info->led_ctrl);
struct phl_led_info_t *led_info = NULL;
u8 intervals_idx = 0;
for (led_id = 0; led_id < RTW_LED_ID_LENGTH; led_id++) {
if ((timer_args->led_manage_mask & BIT(led_id)) == 0)
continue;
led_info = &(led_ctrl->led_info_arr[led_id]);
/* start_delay handling */
if (!led_info->toggle_start_delay_over) {
if (RTW_PHL_STATUS_SUCCESS !=
_phl_led_ctrl_start_delay_hdlr(
timer_args->phl_info->hal, led_info, led_id)) {
status = RTW_PHL_STATUS_FAILURE;
}
continue;
}
/* start_delay is over, handle intervals */
intervals_idx = led_info->toggle_args->intervals_idx;
if (RTW_PHL_STATUS_SUCCESS !=
_phl_led_ctrl_interval_hdlr(
timer_args->phl_info->hal, led_info, led_id,
&(led_ctrl->intervals_arr[intervals_idx]))) {
PHL_TRACE(COMP_PHL_LED, _PHL_INFO_,
"%s: intervals handling failed. led_id: %d\n",
__func__, led_id);
status = RTW_PHL_STATUS_FAILURE;
}
if (led_info->toggle_args->loop > 0 &&
led_info->toggle_args->loop ==
led_info->toggle_loop_counter) {
_phl_led_remove_from_timer(led_info, led_id);
}
}
if (timer_args->timer_alive)
_os_set_timer(drv_priv, &(timer_args->timer),
timer_args->delay_unit);
else
_phl_led_timer_release(timer_args);
return status;
}
static enum rtw_phl_status
_phl_led_ctrl_action_hdlr(struct phl_info_t *phl_info, enum rtw_led_id led_id,
enum rtw_led_action action,
struct rtw_led_toggle_args_t *toggle_args,
struct phl_led_timer_args_t **timer_args_ptr)
{
enum rtw_phl_status status = RTW_PHL_STATUS_SUCCESS;
void *drv_priv = phl_to_drvpriv(phl_info);
struct phl_led_ctrl_t *led_ctrl =
(struct phl_led_ctrl_t *)(phl_info->led_ctrl);
struct phl_led_info_t *led_info = &(led_ctrl->led_info_arr[led_id]);
enum rtw_led_ctrl_mode target_ctrl_mode;
PHL_TRACE(COMP_PHL_LED, _PHL_INFO_,
"%s: led_id == %d, action == 0X%X\n", __func__, led_id,
action);
/* Set ctrl mode*/
switch (action) {
case RTW_LED_ACTION_LOW:
case RTW_LED_ACTION_HIGH:
case RTW_LED_ACTION_TOGGLE:
target_ctrl_mode = led_info->reg_ctrl_mode;
break;
case RTW_LED_ACTION_HW_TRX:
target_ctrl_mode = RTW_LED_CTRL_HW_TRX_MODE;
break;
default:
target_ctrl_mode = led_info->ctrl_mode;
break;
}
if(led_info->ctrl_mode != target_ctrl_mode){
if (rtw_hal_led_set_ctrl_mode(phl_info->hal, led_id,
target_ctrl_mode)){
status = RTW_PHL_STATUS_FAILURE;
return status;
}
led_info->ctrl_mode = target_ctrl_mode;
}
/* Sw action */
switch (action) {
case RTW_LED_ACTION_LOW:
if(led_info->ctrl_mode != RTW_LED_CTRL_SW_PP_MODE &&
led_info->ctrl_mode != RTW_LED_CTRL_SW_OD_MODE)
break;
_phl_led_remove_from_timer(led_info, led_id);
if (RTW_PHL_STATUS_SUCCESS !=
_phl_led_ctrl_write_opt(phl_info->hal, led_id,
&(led_info->curr_opt),
RTW_LED_OPT_LOW))
status = RTW_PHL_STATUS_FAILURE;
break;
case RTW_LED_ACTION_HIGH:
if(led_info->ctrl_mode != RTW_LED_CTRL_SW_PP_MODE &&
led_info->ctrl_mode != RTW_LED_CTRL_SW_OD_MODE)
break;
_phl_led_remove_from_timer(led_info, led_id);
if (RTW_PHL_STATUS_SUCCESS !=
_phl_led_ctrl_write_opt(phl_info->hal, led_id,
&(led_info->curr_opt),
RTW_LED_OPT_HIGH))
status = RTW_PHL_STATUS_FAILURE;
break;
case RTW_LED_ACTION_HW_TRX:
_phl_led_remove_from_timer(led_info, led_id);
break;
case RTW_LED_ACTION_TOGGLE:
if(led_info->ctrl_mode != RTW_LED_CTRL_SW_PP_MODE &&
led_info->ctrl_mode != RTW_LED_CTRL_SW_OD_MODE)
break;
_phl_led_remove_from_timer(led_info, led_id);
led_info->toggle_args = toggle_args;
led_info->toggle_interval_counter = 0;
led_info->toggle_start_delay_counter = toggle_args->start_delay;
led_info->toggle_start_delay_over = false;
led_info->toggle_loop_counter = 0;
led_info->toggle_curr_interval_idx = 0;
if (*timer_args_ptr == NULL) {
if (NULL ==
(*timer_args_ptr = _os_mem_alloc(
drv_priv,
sizeof(struct phl_led_timer_args_t)))) {
PHL_ERR("%s: alloc buffer failed!\n", __func__);
status = RTW_PHL_STATUS_FAILURE;
break;
}
(*timer_args_ptr)->phl_info = phl_info;
_os_init_timer(drv_priv, &((*timer_args_ptr)->timer),
_phl_led_timer_cb, *timer_args_ptr,
"phl_led_timer");
(*timer_args_ptr)->led_manage_mask = 0;
(*timer_args_ptr)->timer_alive = true;
(*timer_args_ptr)->delay_unit = 0;
}
(*timer_args_ptr)->led_manage_mask |= BIT(led_id);
led_info->toggle_timer_args = *timer_args_ptr;
break;
default:
status = RTW_PHL_STATUS_FAILURE;
break;
}
return status;
}
static enum rtw_phl_status _phl_led_ctrl_event_hdlr(struct phl_info_t *phl_info,
enum rtw_led_event event)
{
enum rtw_phl_status status = RTW_PHL_STATUS_SUCCESS;
struct phl_led_event_args_t *event_args = NULL;
struct phl_led_ctrl_t *led_ctrl =
(struct phl_led_ctrl_t *)(phl_info->led_ctrl);
u8 args_idx;
enum rtw_led_id led_id;
struct phl_led_info_t *led_info = NULL;
struct rtw_led_action_args_t *action_args = NULL;
struct phl_led_timer_args_t *timer_args = NULL;
if(event >= RTW_LED_EVENT_LENGTH){
PHL_TRACE(COMP_PHL_LED, _PHL_INFO_, "%s: invalid event(0X%X) >= RTW_LED_EVENT_LENGTH(0X%X).\n",
__func__, event, RTW_LED_EVENT_LENGTH);
return RTW_PHL_STATUS_FAILURE;
}
PHL_TRACE(COMP_PHL_LED, _PHL_INFO_, "%s: event == 0X%X\n", __func__,
event);
/* set state */
switch (event) {
case RTW_LED_EVENT_SW_RF_ON:
PHL_TRACE(COMP_PHL_LED, _PHL_INFO_, "%s: set state sw rf on\n",
__func__);
led_ctrl->state |= RTW_LED_STATE_SW_RF_ON;
break;
case RTW_LED_EVENT_SW_RF_OFF:
PHL_TRACE(COMP_PHL_LED, _PHL_INFO_, "%s: set state sw rf off\n",
__func__);
led_ctrl->state &= ~RTW_LED_STATE_SW_RF_ON;
break;
default:
break;
}
/* handle event */
event_args =
led_ctrl->event_args_list_arr[event]; /* event_args = list head */
for (; event_args != NULL; event_args = event_args->next) {
if (!(event_args->state_condition &
(led_ctrl->state | RTW_LED_STATE_IGNORE)))
continue;
timer_args = NULL;
for (args_idx = 0; args_idx < event_args->action_args_arr_len;
args_idx++) {
action_args = &(event_args->action_args_arr[args_idx]);
led_id = action_args->led_id;
led_info = &(led_ctrl->led_info_arr[led_id]);
if (RTW_PHL_STATUS_SUCCESS !=
_phl_led_ctrl_action_hdlr(
phl_info, led_id, action_args->led_action,
&(action_args->toggle_args), &timer_args)) {
status = RTW_PHL_STATUS_FAILURE;
}
}
if (timer_args == NULL)
continue;
timer_args->delay_unit = event_args->toggle_delay_unit;
if (RTW_PHL_STATUS_SUCCESS !=
_phl_led_ctrl_toggle_hdlr(timer_args))
status = RTW_PHL_STATUS_FAILURE;
}
return status;
}
static enum phl_mdl_ret_code _phl_led_module_init(void *phl, void *dispr,
void **priv)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
void *drv_priv = phl_to_drvpriv(phl_info);
struct phl_led_ctrl_t *led_ctrl = NULL;
enum rtw_led_id led_id = 0;
enum rtw_led_event event_id = 0;
struct phl_led_info_t *led_info = NULL;
struct rtw_led_intervals_t *intervals = NULL;
u8 intervals_idx = 0;
PHL_TRACE(COMP_PHL_LED, _PHL_INFO_, "===> _phl_led_module_init()\n");
if (NULL == (led_ctrl = _os_mem_alloc(drv_priv,
sizeof(struct phl_led_ctrl_t)))) {
PHL_ERR("%s: alloc buffer failed!\n", __func__);
phl_info->led_ctrl = NULL;
return MDL_RET_FAIL;
}
*priv = phl;
phl_info->led_ctrl = led_ctrl;
/* set default value in led_ctrl */
led_ctrl->state = 0;
for (led_id = 0; led_id < RTW_LED_ID_LENGTH; led_id++) {
led_info = &(led_ctrl->led_info_arr[led_id]);
led_info->ctrl_mode = RTW_LED_CTRL_NOT_SUPPORT;
led_info->reg_ctrl_mode = RTW_LED_CTRL_NOT_SUPPORT;
led_info->curr_opt = RTW_LED_OPT_UNKNOWN;
led_info->toggle_interval_counter = 0;
led_info->toggle_start_delay_counter = 0;
led_info->toggle_start_delay_over = false;
led_info->toggle_loop_counter = 0;
led_info->toggle_curr_interval_idx = 0;
led_info->toggle_timer_args = NULL;
led_info->toggle_args = NULL;
}
for (event_id = 0; event_id < RTW_LED_EVENT_LENGTH; event_id++) {
led_ctrl->event_args_list_arr[event_id] = NULL;
}
for (intervals_idx = 0; intervals_idx < PHL_LED_INTERVALS_ARR_LEN_MAX;
intervals_idx++) {
intervals = &(led_ctrl->intervals_arr[intervals_idx]);
intervals->interval_arr = NULL;
intervals->len = 0;
}
return MDL_RET_SUCCESS;
}
static void _phl_led_module_deinit(void *dispr, void *priv)
{
struct phl_info_t *phl_info = (struct phl_info_t *)priv;
void *drv_priv = phl_to_drvpriv(phl_info);
struct phl_led_ctrl_t *led_ctrl =
(struct phl_led_ctrl_t *)(phl_info->led_ctrl);
enum rtw_led_event event = 0;
struct phl_led_event_args_t *event_args = NULL;
struct phl_led_event_args_t *event_args_next = NULL;
struct rtw_led_intervals_t *intervals = NULL;
u8 intervals_idx = 0;
enum rtw_led_id led_id = 0;
struct phl_led_info_t *led_info = NULL;
struct phl_led_timer_args_t *timer_args = NULL;
PHL_TRACE(COMP_PHL_LED, _PHL_INFO_, "===> _phl_led_module_deinit()\n");
if (led_ctrl == NULL)
return;
/* free event_args_list_arr */
for (event = 0; event < RTW_LED_EVENT_LENGTH; event++) {
event_args = led_ctrl->event_args_list_arr[event];
if (event_args == NULL)
continue;
while (event_args != NULL) {
event_args_next = event_args->next;
if (event_args->action_args_arr != NULL)
_os_mem_free(
drv_priv, event_args->action_args_arr,
event_args->action_args_arr_len *
sizeof(struct rtw_led_action_args_t));
_os_mem_free(drv_priv, event_args,
sizeof(struct phl_led_event_args_t));
event_args = event_args_next;
}
}
/* free intervals_arr */
for (intervals_idx = 0; intervals_idx < PHL_LED_INTERVALS_ARR_LEN_MAX;
intervals_idx++) {
intervals = &(led_ctrl->intervals_arr[intervals_idx]);
if (intervals->interval_arr == NULL)
continue;
_os_mem_free(drv_priv, intervals->interval_arr,
intervals->len * sizeof(u32));
}
/* free all timers */
for (led_id = 0; led_id < RTW_LED_ID_LENGTH; led_id++) {
led_info = &(led_ctrl->led_info_arr[led_id]);
if (led_info->toggle_timer_args == NULL)
continue;
timer_args = led_info->toggle_timer_args;
_phl_led_all_remove_from_timer(led_info->toggle_timer_args);
_phl_led_timer_release(timer_args);
}
_os_mem_free(drv_priv, led_ctrl, sizeof(struct phl_led_ctrl_t));
phl_info->led_ctrl = NULL;
}
static enum phl_mdl_ret_code _phl_led_module_start(void *dispr, void *priv)
{
struct phl_info_t *phl_info = (struct phl_info_t *)priv;
struct phl_led_ctrl_t *led_ctrl =
(struct phl_led_ctrl_t *)(phl_info->led_ctrl);
enum phl_mdl_ret_code ret = MDL_RET_SUCCESS;
enum rtw_led_id led_id = 0;
PHL_TRACE(COMP_PHL_LED, _PHL_INFO_, "===> _phl_led_module_start()\n");
if (led_ctrl == NULL) {
PHL_TRACE(COMP_PHL_LED, _PHL_INFO_, "%s: led_ctrl == NULL\n",
__func__);
return MDL_RET_FAIL;
}
for (led_id = 0; led_id < RTW_LED_ID_LENGTH; led_id++) {
if (RTW_HAL_STATUS_SUCCESS !=
rtw_hal_led_set_ctrl_mode(
phl_info->hal, led_id,
led_ctrl->led_info_arr[led_id].ctrl_mode))
ret = MDL_RET_FAIL;
}
if (RTW_PHL_STATUS_SUCCESS !=
_phl_led_ctrl_event_hdlr(phl_info, RTW_LED_EVENT_PHL_START))
ret = MDL_RET_FAIL;
if (RTW_PHL_STATUS_SUCCESS !=
_phl_led_ctrl_event_hdlr(phl_info, RTW_LED_EVENT_SW_RF_ON))
ret = MDL_RET_FAIL;
return ret;
}
static enum phl_mdl_ret_code _phl_led_module_stop(void *dispr, void *priv)
{
struct phl_info_t *phl_info = (struct phl_info_t *)priv;
struct phl_led_ctrl_t *led_ctrl =
(struct phl_led_ctrl_t *)(phl_info->led_ctrl);
enum phl_mdl_ret_code ret = MDL_RET_SUCCESS;
PHL_TRACE(COMP_PHL_LED, _PHL_INFO_, "===> _phl_led_module_stop()\n");
if (led_ctrl == NULL) {
PHL_TRACE(COMP_PHL_LED, _PHL_INFO_, "%s: led_ctrl == NULL\n",
__func__);
return MDL_RET_FAIL;
}
if (RTW_PHL_STATUS_SUCCESS !=
_phl_led_ctrl_event_hdlr(phl_info, RTW_LED_EVENT_SW_RF_OFF))
ret = MDL_RET_FAIL;
if (RTW_PHL_STATUS_SUCCESS !=
_phl_led_ctrl_event_hdlr(phl_info, RTW_LED_EVENT_PHL_STOP))
ret = MDL_RET_FAIL;
return ret;
}
static enum phl_mdl_ret_code _phl_led_module_msg_hdlr(void *dispr, void *priv,
struct phl_msg *msg)
{
struct phl_info_t *phl_info = (struct phl_info_t *)priv;
struct phl_led_ctrl_t *led_ctrl =
(struct phl_led_ctrl_t *)(phl_info->led_ctrl);
enum phl_msg_evt_id msg_evt_id = MSG_EVT_ID_FIELD(msg->msg_id);
struct phl_led_timer_args_t *timer_args = NULL;
PHL_TRACE(COMP_PHL_LED, _PHL_INFO_,
"===> _phl_led_module_msg_hdlr()\n");
if (IS_MSG_IN_PRE_PHASE(msg->msg_id))
return MDL_RET_SUCCESS;
if (MSG_MDL_ID_FIELD(msg->msg_id) != PHL_MDL_LED)
return MDL_RET_IGNORE;
if(IS_MSG_CANNOT_IO(msg->msg_id))
return MDL_RET_FAIL;
if (led_ctrl == NULL) {
PHL_TRACE(COMP_PHL_LED, _PHL_INFO_, "%s: led_ctrl == NULL\n",
__func__);
return MDL_RET_FAIL;
}
if(msg_evt_id < MSG_EVT_LED_EVT_START || msg_evt_id > MSG_EVT_LED_EVT_END){
if (msg_evt_id == MSG_EVT_LED_TICK) {
PHL_TRACE(COMP_PHL_LED, _PHL_INFO_, "%s: MSG_EVT_LED_TICK\n",
__func__);
timer_args = (struct phl_led_timer_args_t *)(msg->inbuf);
if (!timer_args->timer_alive) {
_phl_led_timer_release(timer_args);
return MDL_RET_SUCCESS;
}
if (RTW_PHL_STATUS_SUCCESS !=
_phl_led_ctrl_toggle_hdlr(timer_args))
return MDL_RET_FAIL;
return MDL_RET_SUCCESS;
}
}
else {
if (RTW_PHL_STATUS_SUCCESS !=
_phl_led_ctrl_event_hdlr(phl_info, msg_evt_id - MSG_EVT_LED_EVT_START))
return MDL_RET_FAIL;
}
return MDL_RET_SUCCESS;
}
static enum phl_mdl_ret_code
_phl_led_module_set_info(void *dispr, void *priv,
struct phl_module_op_info *info)
{
return MDL_RET_SUCCESS;
}
static enum phl_mdl_ret_code
_phl_led_module_query_info(void *dispr, void *priv,
struct phl_module_op_info *info)
{
return MDL_RET_SUCCESS;
}
enum rtw_phl_status phl_register_led_module(struct phl_info_t *phl_info)
{
#ifdef CONFIG_CMD_DISP
enum rtw_phl_status phl_status = RTW_PHL_STATUS_FAILURE;
struct phl_bk_module_ops bk_ops;
bk_ops.init = _phl_led_module_init;
bk_ops.deinit = _phl_led_module_deinit;
bk_ops.start = _phl_led_module_start;
bk_ops.stop = _phl_led_module_stop;
bk_ops.msg_hdlr = _phl_led_module_msg_hdlr;
bk_ops.set_info = _phl_led_module_set_info;
bk_ops.query_info = _phl_led_module_query_info;
phl_status = phl_disp_eng_register_module(phl_info, HW_BAND_0,
PHL_MDL_LED, &bk_ops);
if (phl_status != RTW_PHL_STATUS_SUCCESS) {
PHL_ERR("%s register LED module in cmd disp failed\n",
__func__);
phl_status = RTW_PHL_STATUS_FAILURE;
}
return phl_status;
#else
return RTW_PHL_STATUS_FAILURE;
#endif
}
void rtw_phl_led_set_ctrl_mode(void *phl, enum rtw_led_id led_id,
enum rtw_led_ctrl_mode ctrl_mode)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct phl_led_ctrl_t *led_ctrl =
(struct phl_led_ctrl_t *)phl_info->led_ctrl;
PHL_TRACE(COMP_PHL_LED, _PHL_INFO_,
"===> %s()\n", __func__);
if (led_ctrl == NULL) {
PHL_TRACE(COMP_PHL_LED, _PHL_INFO_, "%s: led_ctrl == NULL\n",
__func__);
return;
}
PHL_TRACE(COMP_PHL_LED, _PHL_INFO_,
"%s: led_id == %d, ctrl_mode == %d\n", __func__, led_id,
ctrl_mode);
led_ctrl->led_info_arr[led_id].ctrl_mode = ctrl_mode;
led_ctrl->led_info_arr[led_id].reg_ctrl_mode = ctrl_mode;
}
void rtw_phl_led_set_toggle_intervals(void *phl, u8 intervals_idx,
u32 *interval_arr, u8 intervals_len)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct phl_led_ctrl_t *led_ctrl =
(struct phl_led_ctrl_t *)phl_info->led_ctrl;
void *drv_priv = phl_to_drvpriv(phl_info);
struct rtw_led_intervals_t *intervals = NULL;
PHL_TRACE(COMP_PHL_LED, _PHL_INFO_,
"===> rtw_phl_led_set_toggle_intervals()\n");
if (led_ctrl == NULL) {
PHL_TRACE(COMP_PHL_LED, _PHL_INFO_, "%s: led_ctrl == NULL\n",
__func__);
return;
}
if (intervals_idx >= PHL_LED_INTERVALS_ARR_LEN_MAX) {
PHL_TRACE(
COMP_PHL_LED, _PHL_INFO_,
"%s: intervals_idx >= PHL_LED_INTERVALS_ARR_LEN_MAX\n",
__func__);
return;
}
intervals = &(led_ctrl->intervals_arr[intervals_idx]);
/* check if the target intervals_arr has already been set */
if (intervals->interval_arr != NULL) {
PHL_TRACE(COMP_PHL_LED, _PHL_INFO_,
"%s: intervals_arr[%d] has already been set. "
"The new one is going to replace the old one!\n",
__func__, intervals_idx);
_os_mem_free(drv_priv, intervals->interval_arr,
intervals->len * sizeof(u32));
intervals->interval_arr = NULL;
intervals->len = 0;
}
if (NULL == (intervals->interval_arr = _os_mem_alloc(
drv_priv, intervals_len * sizeof(u32)))) {
PHL_ERR("%s: alloc buffer failed!\n", __func__);
return;
}
_os_mem_cpy(drv_priv, intervals->interval_arr, interval_arr,
intervals_len * sizeof(u32));
intervals->len = intervals_len;
return;
}
void rtw_phl_led_set_action(void *phl, enum rtw_led_event event,
enum rtw_led_state state_condition,
struct rtw_led_action_args_t *action_args_arr,
u8 action_args_arr_len, u32 toggle_delay_unit)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct phl_led_ctrl_t *led_ctrl =
(struct phl_led_ctrl_t *)phl_info->led_ctrl;
void *drv_priv = phl_to_drvpriv(phl_info);
struct phl_led_event_args_t *event_args = NULL;
struct phl_led_event_args_t *event_args_prev = NULL;
PHL_TRACE(COMP_PHL_LED, _PHL_INFO_, "===> %s()\n", __func__);
if (led_ctrl == NULL) {
PHL_TRACE(COMP_PHL_LED, _PHL_INFO_, "%s: led_ctrl == NULL\n",
__func__);
return;
}
if (action_args_arr == NULL) {
PHL_TRACE(COMP_PHL_LED, _PHL_INFO_,
"%s: input -- action_args_arr == NULL\n", __func__);
return;
}
PHL_TRACE(COMP_PHL_LED, _PHL_INFO_,
"%s: event == %d, state_condition == %d\n", __func__, event,
state_condition);
event_args =
led_ctrl->event_args_list_arr[event]; /* event_args = list head */
while (event_args != NULL) {
if (event_args->state_condition == state_condition) {
PHL_TRACE(
COMP_PHL_LED, _PHL_INFO_,
"%s: the event_args_list of event 0x%x already has "
"a node with state_condition == 0x%x\n",
__func__, event, state_condition);
return;
}
event_args_prev = event_args;
event_args = event_args->next;
}
if (NULL == (event_args = _os_mem_alloc(
drv_priv, sizeof(struct phl_led_event_args_t)))) {
PHL_ERR("%s: alloc buffer failed!\n", __func__);
return;
}
if (NULL == (event_args->action_args_arr = _os_mem_alloc(
drv_priv, action_args_arr_len *
sizeof(struct rtw_led_action_args_t)))) {
PHL_ERR("%s: alloc buffer failed!\n", __func__);
_os_mem_free(drv_priv, event_args,
sizeof(struct phl_led_event_args_t));
return;
}
event_args->action_args_arr_len = action_args_arr_len;
event_args->state_condition = state_condition;
event_args->toggle_delay_unit = toggle_delay_unit;
_os_mem_cpy(drv_priv, event_args->action_args_arr, action_args_arr,
action_args_arr_len * sizeof(struct rtw_led_action_args_t));
event_args->next = NULL;
if (event_args_prev == NULL)
/* the event_args_list was empty */
led_ctrl->event_args_list_arr[event] = event_args;
else
event_args_prev->next = event_args;
}
void phl_led_control(struct phl_info_t *phl_info, enum rtw_led_event led_event)
{
#ifdef CONFIG_CMD_DISP
struct phl_msg msg = {0};
struct phl_msg_attribute attr = {0};
PHL_TRACE(COMP_PHL_LED, _PHL_INFO_, "===> rtw_phl_led_control()\n");
SET_MSG_MDL_ID_FIELD(msg.msg_id, PHL_MDL_LED);
/*
* led_event here is passed via the msg_evt_id field instead of
* msg_evt_id due to the following reason:
*
* (a) led_event is used for mapping LED events with LED actions, and
* the mapping can be configured in core layer according to the
* customized LED table.
*
* (b) LED module inside uses led_event as the index of led action
* arrays, and hence it would be inappropriate to directly replace
* led_event with msg_evt_id which is not continuous and does not
* start from zero.
*
* (c) It is not worth it to use inbuf with the overhead of dynamic
* allocation and completion callback only for a number.
*/
SET_MSG_EVT_ID_FIELD(msg.msg_id, led_event + MSG_EVT_LED_EVT_START);
msg.band_idx = HW_BAND_0;
phl_disp_eng_send_msg(phl_info, &msg, &attr, NULL);
#else
PHL_ERR("phl_fsm not support %s\n", __func__);
#endif
}
void rtw_phl_led_control(void *phl, enum rtw_led_event led_event)
{
phl_led_control((struct phl_info_t *)phl, led_event);
}
|
2301_81045437/rtl8852be
|
phl/phl_led.c
|
C
|
agpl-3.0
| 27,708
|
/******************************************************************************
*
* Copyright(c) 2019 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#ifndef _PHL_LED_H_
#define _PHL_LED_H_
enum rtw_phl_status phl_register_led_module(struct phl_info_t *phl_info);
void phl_led_control(struct phl_info_t *phl_info, enum rtw_led_event led_event);
#endif /*_PHL_LED_H_*/
|
2301_81045437/rtl8852be
|
phl/phl_led.h
|
C
|
agpl-3.0
| 870
|
/******************************************************************************
*
* Copyright(c) 2019 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#ifndef _PHL_LED_DEF_H_
#define _PHL_LED_DEF_H_
#define PHL_RADIO_ON_OFF_NOT_READY
enum rtw_led_ctrl_mode {
RTW_LED_CTRL_NOT_SUPPORT,
RTW_LED_CTRL_HW_TRX_MODE,
RTW_LED_CTRL_HW_TX_MODE,
RTW_LED_CTRL_HW_RX_MODE,
RTW_LED_CTRL_SW_PP_MODE,
RTW_LED_CTRL_SW_OD_MODE,
};
enum rtw_led_id { RTW_LED_ID_0, RTW_LED_ID_1, RTW_LED_ID_LENGTH };
/*
* led_event here is not integrated with msg_evt_id due to the following reason:
*
* (a) led_event is used for mapping LED events with LED actions, and
* the mapping can be configured in core layer according to the
* customized LED table.
*
* (b) LED module inside uses led_event as the index of led action
* arrays, and hence it would be inappropriate to directly replace
* led_event with msg_evt_id which is not continuous and does not
* start from zero.
*/
enum rtw_led_event {
RTW_LED_EVENT_PHL_START,
RTW_LED_EVENT_PHL_STOP,
RTW_LED_EVENT_SW_RF_ON,
RTW_LED_EVENT_SW_RF_OFF,
RTW_LED_EVENT_SCAN_START,
RTW_LED_EVENT_LINK_START,
RTW_LED_EVENT_LINKED,
RTW_LED_EVENT_NO_LINK,
RTW_LED_EVENT_LINKED_CIPHER,
RTW_LED_EVENT_LINKED_NOCIPHER,
RTW_LED_EVENT_LINKED_24G,
RTW_LED_EVENT_LINKED_5G,
RTW_LED_EVENT_LENGTH
};
enum rtw_led_state {
RTW_LED_STATE_IGNORE = BIT0,
RTW_LED_STATE_SW_RF_ON = BIT1,
};
enum rtw_led_action {
RTW_LED_ACTION_LOW,
RTW_LED_ACTION_HIGH,
RTW_LED_ACTION_HW_TRX,
RTW_LED_ACTION_TOGGLE,
};
enum rtw_led_opt {
RTW_LED_OPT_LOW = 0,
RTW_LED_OPT_HIGH = 1,
RTW_LED_OPT_UNKNOWN = 2
};
struct rtw_led_toggle_args_t {
enum rtw_led_opt start_opt;
u32 start_delay;
u32 loop;
u8 intervals_idx;
};
struct rtw_led_action_args_t {
enum rtw_led_id led_id;
enum rtw_led_action led_action;
struct rtw_led_toggle_args_t toggle_args;
};
struct rtw_led_intervals_t {
u32 *interval_arr;
u8 len;
};
#endif /*_PHL_LED_DEF_H_*/
|
2301_81045437/rtl8852be
|
phl/phl_led_def.h
|
C
|
agpl-3.0
| 2,481
|
/******************************************************************************
*
* Copyright(c) 2019 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#ifndef _PHL_LIST_H_
#define _PHL_LIST_H_
/*
* Copied from include/linux/...
*/
#ifdef PHL_PLATFORM_WINDOWS
#define phl_container_of(_ptr, _type, _member) container_of(_ptr, _type, _member)
#define phl_list_for_loop(_pos, _type, _head, _member) \
list_for_each_entry(_pos, _type, _head, _member)
#define phl_list_for_loop_safe(_pos, _n, _type, _head, _member) \
list_for_each_entry_safe(_pos, _n, _type, _head, _member)
#elif defined(PHL_PLATFORM_LINUX)
#define phl_container_of(_ptr, _type, _member) container_of(_ptr, _type, _member)
#define phl_list_for_loop(_pos, _type, _head, _member) \
list_for_each_entry(_pos, _head, _member)
#define phl_list_for_loop_safe(_pos, _n, _type, _head, _member) \
list_for_each_entry_safe(_pos, _n, _head, _member)
#elif defined(PHL_PLATFORM_MACOS)
#define phl_container_of(_ptr, _type, _member) container_of(_ptr, _type, _member)
#define phl_list_for_loop(_pos, _type, _head, _member) \
list_for_each_entry(_pos, _type, _head, _member)
#define phl_list_for_loop_safe(_pos, _n, _type, _head, _member) \
list_for_each_entry_safe(_pos, _n, _type, _head, _member)
#else /* os free */
#undef offsetof
#define offsetof(_type, _member) ((size_t) &((_type *)0)->_member)
#define phl_container_of(_ptr, _type, _member) container_of(_ptr, _type, _member)
#define phl_list_for_loop(_pos, _type, _head, _member) \
list_for_each_entry(_pos, _type, _head, _member)
#define phl_list_for_loop_safe(_pos, _n, _type, _head, _member) \
list_for_each_entry_safe(_pos, _n, _type, _head, _member)
#endif
enum list_pos {
_first,
_tail
};
#ifndef PHL_PLATFORM_LINUX
/**
* container_of - cast a member of a structure out to the containing structure
* @ptr: the pointer to the member.
* @type: the type of the container struct this is embedded in.
* @member: the name of the member within the struct.
*
*/
#define container_of(_ptr, _type, _member) \
((_type*)((char*)(_ptr) - (char*)&(((_type*)0)->_member)))
struct list_head {
struct list_head *next, *prev;
};
#define LIST_HEAD_INIT(name) { &(name), &(name) }
#define LIST_HEAD(name) \
struct list_head name = LIST_HEAD_INIT(name)
static __inline void INIT_LIST_HEAD(struct list_head *list)
{
list->next = list;
list->prev = list;
}
/**
* list_entry - get the struct for this entry
* @ptr: the &struct list_head pointer.
* @type: the type of the struct this is embedded in.
* @member: the name of the list_head within the struct.
*/
#define list_entry(ptr, type, member) \
container_of(ptr, type, member)
/**
* list_first_entry - get the first element from a list
* @ptr: the list head to take the element from.
* @type: the type of the struct this is embedded in.
* @member: the name of the list_head within the struct.
*
* Note, that list is expected to be not empty.
*/
#define list_first_entry(ptr, type, member) \
list_entry((ptr)->next, type, member)
/**
* list_last_entry - get the last element from a list
* @ptr: the list head to take the element from.
* @type: the type of the struct this is embedded in.
* @member: the name of the list_head within the struct.
*
* Note, that list is expected to be not empty.
*/
#define list_last_entry(ptr, type, member) \
list_entry((ptr)->prev, type, member)
/**
* list_for_each_entry - iterate over list of given type
* @pos: the type * to use as a loop cursor.
* @type: the type of the struct this is embedded in.
* @head: the head for your list.
* @member: the name of the list_head within the struct.
*/
#define list_for_each_entry(pos, type, head, member) \
for (pos = list_entry((head)->next, type, member); \
&pos->member != (head); \
pos = list_entry(pos->member.next, type, member))
/**
* list_for_each_entry_safe - iterate over list of given type safe against removal of list entry
* @pos: the type * to use as a loop cursor.
* @n: another type * to use as temporary storage
* @type: the type of the struct this is embedded in
* @head: the head for your list.
* @member: the name of the list_head within the struct.
*/
#define list_for_each_entry_safe(pos, n, type, head, member) \
for (pos = list_entry((head)->next, type, member), \
n = list_entry(pos->member.next, type, member); \
&pos->member != (head); \
pos = n, n = list_entry(n->member.next, type, member))
/**
* list_empty - tests whether a list is empty
* @head: the list to test.
*/
static __inline int list_empty(const struct list_head *head)
{
return head->next == head;
}
/*
* Insert a new entry between two known consecutive entries.
*
* This is only for internal list manipulation where we know
* the prev/next entries already!
*/
static __inline void __list_add(struct list_head *_new,
struct list_head *prev,
struct list_head *next)
{
next->prev = _new;
_new->next = next;
_new->prev = prev;
prev->next = _new;
}
/**
* list_add - add a new entry
* @new: new entry to be added
* @head: list head to add it after
*
* Insert a new entry after the specified head.
* This is good for implementing stacks.
*/
static __inline void list_add(struct list_head *_new, struct list_head *head)
{
__list_add(_new, head, head->next);
}
/**
* list_add_tail - add a new entry
* @new: new entry to be added
* @head: list head to add it before
*
* Insert a new entry before the specified head.
* This is useful for implementing queues.
*/
static __inline void list_add_tail(struct list_head *_new, struct list_head *head)
{
__list_add(_new, head->prev, head);
}
/*
* Delete a list entry by making the prev/next entries
* point to each other.
*
* This is only for internal list manipulation where we know
* the prev/next entries already!
*/
static __inline void __list_del(struct list_head *prev, struct list_head *next)
{
next->prev = prev;
prev->next = next;
}
#define LIST_POISON1 ((void *) 0x00100100)
#define LIST_POISON2 ((void *) 0x00200200)
/**
* list_del - deletes entry from list.
* @entry: the element to delete from the list.
* Note: list_empty() on entry does not return true after this, the entry is
* in an undefined state.
*/
static __inline void list_del(struct list_head *entry)
{
__list_del(entry->prev, entry->next);
entry->next = (struct list_head*)LIST_POISON1;
entry->prev = (struct list_head*)LIST_POISON2;
}
#endif
#endif /*_PHL_LIST_H_*/
|
2301_81045437/rtl8852be
|
phl/phl_list.h
|
C
|
agpl-3.0
| 7,019
|
/******************************************************************************
*
* Copyright(c) 2019 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#define _PHL_MCC_C_
#include "phl_headers.h"
#ifdef CONFIG_MCC_SUPPORT
#include "phl_mcc.h"
void _mcc_dump_state(enum rtw_phl_mcc_state *state)
{
if (MCC_NONE == *state) {
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_dump_state(): MCC_NONE\n");
} else if (MCC_CFG_EN_INFO == *state) {
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_dump_state(): MCC_CFG_EN_INFO\n");
} else if (MCC_TRIGGER_FW_EN == *state) {
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_dump_state(): MCC_TRIGGER_FW_EN\n");
} else if (MCC_FW_EN_FAIL == *state) {
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_dump_state(): MCC_FW_EN_FAIL\n");
} else if (MCC_RUNING == *state) {
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_dump_state(): MCC_RUNING\n");
} else if (MCC_TRIGGER_FW_DIS == *state) {
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_dump_state(): MCC_TRIGGER_FW_DIS\n");
} else if (MCC_FW_DIS_FAIL == *state) {
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_dump_state(): MCC_FW_DIS_FAIL\n");
} else if (MCC_STOP == *state) {
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_dump_state(): MCC_STOP\n");
} else {
PHL_TRACE(COMP_PHL_MCC, _PHL_WARNING_, "_mcc_dump_state(): Undefined state(%d)\n",
*state);
}
}
void _mcc_dump_mode(enum rtw_phl_tdmra_wmode *mode)
{
if (RTW_PHL_TDMRA_AP_CLIENT_WMODE == *mode) {
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_dump_mode(): RTW_PHL_TDMRA_AP_CLIENT_WMODE\n");
} else if (RTW_PHL_TDMRA_2CLIENTS_WMODE == *mode) {
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_dump_mode(): RTW_PHL_TDMRA_2CLIENTS_WMODE\n");
} else if (RTW_PHL_TDMRA_AP_WMODE == *mode) {
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_dump_mode(): RTW_PHL_TDMRA_AP_WMODE\n");
} else {
PHL_TRACE(COMP_PHL_MCC, _PHL_WARNING_, "_mcc_dump_mode(): Undefined mode(%d)\n",
*mode);
}
}
void _mcc_dump_sync_tsf_info(struct rtw_phl_mcc_sync_tsf_info *info)
{
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_dump_sync_tsf_info(): sync_en(%d), source macid(%d), target macid(%d), offset(%d)\n",
info->sync_en, info->source, info->target, info->offset);
}
void _mcc_dump_dur_info(struct rtw_phl_mcc_dur_info *dur_i)
{
struct rtw_phl_mcc_dur_lim_info *dur_l = NULL;
dur_l = &dur_i->dur_limit;
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, ">>> _mcc_dump_role_info(): dur(%d), dur lim info: enable(%d), tag(%d), max_dur(%d)\n",
dur_i->dur, dur_l->enable, dur_l->tag, dur_l->max_dur);
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "<<< _mcc_dump_role_info(): max_toa(%d), max_tob(%d)\n",
dur_l->max_toa, dur_l->max_tob);
}
void _mcc_dump_role_info(struct rtw_phl_mcc_role *mrole)
{
struct rtw_phl_mcc_policy_info *policy = &mrole->policy;
u8 i = 0;
policy = &mrole->policy;
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, ">>> _mcc_dump_role_info(): wrole id(%d), type(%d), macid(%d), bcn_intvl(%d)\n",
mrole->wrole->id, mrole->wrole->type, mrole->macid,
mrole->bcn_intvl);
for (i = 0; i < PHL_MACID_MAX_ARRAY_NUM; i++) {
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_dump_role_info(): macid_map[%d]= 0x%08X\n",
i, mrole->used_macid.bitmap[i]);
}
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_dump_role_info(): chan(%d), center_ch(%d), bw(%d), offset(%d)\n",
mrole->chandef->chan, mrole->chandef->center_ch,
mrole->chandef->bw, mrole->chandef->offset);
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_dump_role_info(): group(%d), c2h_rpt(%d), tx_null_early(%d)\n",
mrole->group, policy->c2h_rpt, policy->tx_null_early);
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_dump_role_info(): dis_tx_null(%d), in_curr_ch(%d), dis_sw_retry(%d)\n",
policy->dis_tx_null, policy->in_curr_ch, policy->dis_sw_retry);
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_dump_role_info(): sw_retry_count(%d), rfk_chk(%d)\n",
policy->sw_retry_count, policy->rfk_chk);
_mcc_dump_dur_info(&policy->dur_info);
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "<<< _mcc_dump_role_info(): courtesy_en(%d), courtesy_num(%d), courtesy_target(0x%x)\n",
policy->courtesy_en, policy->courtesy_num,
policy->courtesy_target);
}
void _mcc_dump_pattern(struct rtw_phl_mcc_pattern *m_pattern)
{
struct rtw_phl_mcc_courtesy *courtesy_i = &m_pattern->courtesy_i;
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, ">>> _mcc_dump_pattern(): tob_r(%d), toa_r(%d), tob_a(%d), toa_a(%d), bcns_offset(%d), calc_fail(%d), d_r_d_a_spacing_max(%d), c_en(%d)\n",
m_pattern->tob_r, m_pattern->toa_r, m_pattern->tob_a,
m_pattern->toa_a, m_pattern->bcns_offset, m_pattern->calc_fail,
m_pattern->d_r_d_a_spacing_max, courtesy_i->c_en);
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_dump_pattern(): slot_num(%d), bt_slot_num(%d)\n",
m_pattern->slot_num, m_pattern->bt_slot_num);
if (courtesy_i->c_en) {
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "<<< _mcc_dump_pattern(): c_en(%d), c_num(%d), src_role->macid(0x%x), tgt_role->macid(0x%x)\n",
courtesy_i->c_en, courtesy_i->c_num,
courtesy_i->src_role->macid,
courtesy_i->tgt_role->macid);
}
}
void _mcc_dump_ref_role_info(struct rtw_phl_mcc_en_info *info)
{
struct rtw_phl_mcc_role *ref_role = NULL;
ref_role = get_ref_role(info);
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_dump_ref_role_info(): mrole idx(%d), wrole id(%d), macid(%d) chan(%d), bw(%d), offset(%d)\n",
info->ref_role_idx, ref_role->wrole->id, ref_role->macid,
ref_role->chandef->chan, ref_role->chandef->bw,
ref_role->chandef->offset);
}
void _mcc_dump_en_info(struct rtw_phl_mcc_en_info *info)
{
struct rtw_phl_mcc_role *m_role = NULL;
u8 midx = 0;
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_dump_en_info(): mrole_map(0x%x), role_num(%d), mcc_intvl(%d), Start tsf(0x%08X %08X)\n",
info->mrole_map, info->mrole_num, info->mcc_intvl,
info->tsf_high, info->tsf_low);
_mcc_dump_ref_role_info(info);
_mcc_dump_sync_tsf_info(&info->sync_tsf_info);
_mcc_dump_pattern(&info->m_pattern);
for (midx = 0; midx < MCC_ROLE_NUM; midx++) {
if (!(info->mrole_map & BIT(midx)))
continue;
m_role = &info->mcc_role[midx];
_mcc_dump_role_info(m_role);
}
}
void _mcc_dump_bt_ino(struct rtw_phl_mcc_bt_info *bt_info)
{
u8 seg_num = BT_SEG_NUM;
if (bt_info->bt_seg_num > seg_num)
return;
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_dump_bt_ino(): bt_dur(%d), bt_seg_num(%d), bt_seg[0](%d), bt_seg[1](%d), add_bt_role(%d)\n",
bt_info->bt_dur, bt_info->bt_seg_num, bt_info->bt_seg[0],
bt_info->bt_seg[1], bt_info->add_bt_role);
}
void _mcc_dump_mcc_info(struct phl_mcc_info *minfo)
{
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, ">>> _mcc_dump_mcc_info():\n");
_mcc_dump_mode(&minfo->mcc_mode);
_mcc_dump_state(&minfo->state);
_mcc_dump_bt_ino(&minfo->bt_info);
_mcc_dump_en_info(&minfo->en_info);
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "<<< _mcc_dump_mcc_info():\n");
}
void _mcc_set_state(struct phl_mcc_info *minfo, enum rtw_phl_mcc_state state)
{
PHL_TRACE(COMP_PHL_MCC, _PHL_ALWAYS_, "_mcc_set_state(): Set from (%d) to (%d)\n",
minfo->state, state);
minfo->state = state;
_mcc_dump_state(&minfo->state);
}
bool _mcc_is_ap_category(struct rtw_wifi_role_t *wrole)
{
bool ret = false;
if (wrole->type == PHL_RTYPE_AP || wrole->type == PHL_RTYPE_P2P_GO)
ret = true;
return ret;
}
bool _mcc_is_client_category(struct rtw_wifi_role_t *wrole)
{
bool ret = false;
if (wrole->type == PHL_RTYPE_STATION || wrole->type == PHL_RTYPE_P2P_GC || wrole->type == PHL_RTYPE_TDLS)
ret = true;
return ret;
}
struct rtw_phl_mcc_role *
_mcc_get_mrole_by_wrole(struct phl_mcc_info *minfo,
struct rtw_wifi_role_t *wrole)
{
struct rtw_phl_mcc_en_info *en_info = &minfo->en_info;
struct rtw_phl_mcc_role *m_role = NULL;
u8 midx = 0;
for (midx = 0; midx < MCC_ROLE_NUM; midx++) {
if (!(en_info->mrole_map & BIT(midx)))
continue;
m_role = &en_info->mcc_role[midx];
if (m_role->wrole == wrole) {
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_get_mrole_by_wrole(): Get mrole in mrole_idx(%d), wrole->type(%d), wrole->id(%d)\n",
midx, wrole->type, wrole->id);
return m_role;
}
}
return NULL;
}
u8
_mcc_get_mrole_idx_by_wrole(struct phl_mcc_info *minfo,
struct rtw_wifi_role_t *wrole, u8 *idx)
{
enum rtw_phl_status status = RTW_PHL_STATUS_FAILURE;
struct rtw_phl_mcc_en_info *en_info = &minfo->en_info;
struct rtw_phl_mcc_role *m_role = NULL;
u8 midx = 0;
for (midx = 0; midx < MCC_ROLE_NUM; midx++) {
if (!(en_info->mrole_map & BIT(midx)))
continue;
m_role = &en_info->mcc_role[midx];
if (m_role->wrole == wrole) {
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_get_mrole_idx_by_wrole(): Get mrole in mrole_idx(%d)\n",
midx);
*idx = midx;
status = RTW_PHL_STATUS_SUCCESS;
break;
}
}
return status;
}
struct rtw_phl_mcc_role *
_mcc_get_mrole_by_category(struct rtw_phl_mcc_en_info *en_info,
enum _mcc_role_cat category)
{
struct rtw_phl_mcc_role *m_role = NULL;
u8 midx = 0;
for (midx = 0; midx < MCC_ROLE_NUM; midx++) {
if (!(en_info->mrole_map & BIT(midx)))
continue;
m_role = &en_info->mcc_role[midx];
if (MCC_ROLE_AP_CAT == category) {
if (_mcc_is_ap_category(m_role->wrole))
return m_role;
} else if (MCC_ROLE_CLIENT_CAT == category) {
if (_mcc_is_client_category(m_role->wrole))
return m_role;
} else {
PHL_TRACE(COMP_PHL_MCC, _PHL_WARNING_, "_mcc_get_mrole_by_category(): Undefined category(%d)\n",
category);
break;
}
}
return NULL;
}
enum rtw_phl_status _mcc_transfer_mode(struct phl_info_t *phl,
struct phl_mcc_info *minfo, u8 role_map)
{
enum rtw_phl_status status = RTW_PHL_STATUS_FAILURE;
struct rtw_wifi_role_t *wrole = NULL;
u8 ridx = 0, ap_num = 0, client_num = 0;
for (ridx = 0; ridx < MAX_WIFI_ROLE_NUMBER; ridx++) {
if (!(role_map & BIT(ridx)))
continue;
wrole = phl_get_wrole_by_ridx(phl, ridx);
if (wrole == NULL) {
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_transfer_mode(): get wrole fail, role_idx(%d)\n",
ridx);
goto exit;
}
if (_mcc_is_client_category(wrole)) {
client_num++;
} else if (_mcc_is_ap_category(wrole)) {
ap_num++;
} else {
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_transfer_mode(): undefined category, role->type(%d), ridx(%d), shall check code flow\n",
wrole->type, ridx);
goto exit;
}
}
if ((client_num + ap_num > MAX_MCC_GROUP_ROLE)){
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_transfer_mode(): client_num(%d) + ap_num(%d) is illegal num, please check code flow\n",
client_num, ap_num);
goto exit;
}
if (ap_num == 1 && client_num == 1) {
minfo->mcc_mode = RTW_PHL_TDMRA_AP_CLIENT_WMODE;
} else if (ap_num == 0 && client_num == 2) {
minfo->mcc_mode = RTW_PHL_TDMRA_2CLIENTS_WMODE;
} else if (ap_num == 1 && client_num == 0) {
minfo->mcc_mode = RTW_PHL_TDMRA_AP_WMODE;
} else {
minfo->mcc_mode = RTW_PHL_TDMRA_UNKNOWN_WMODE;
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_transfer_mode(): Undefined mode, please check code flow\n");
goto exit;
}
_mcc_dump_mode(&minfo->mcc_mode);
status = RTW_PHL_STATUS_SUCCESS;
exit:
return status;
}
enum rtw_phl_status _mcc_get_role_map(struct phl_info_t *phl,
struct hw_band_ctl_t *band_ctrl, u8 *role_map)
{
enum rtw_phl_status status = RTW_PHL_STATUS_FAILURE;
void *drv = phl_to_drvpriv(phl);
struct rtw_chan_ctx *chanctx = NULL;
_os_list *chan_ctx_list = &band_ctrl->chan_ctx_queue.queue;
*role_map = 0;
_os_spinlock(drv, &band_ctrl->chan_ctx_queue.lock, _ps, NULL);
phl_list_for_loop(chanctx, struct rtw_chan_ctx, chan_ctx_list, list) {
*role_map |= chanctx->role_map;
}
_os_spinunlock(drv, &band_ctrl->chan_ctx_queue.lock, _ps, NULL);
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_get_role_map(): role_map(%d)\n",
*role_map);
status = RTW_PHL_STATUS_SUCCESS;
return status;
}
void _mcc_set_fw_log_info(struct phl_info_t *phl, u8 hw_band,
bool en_fw_mcc_log, u8 fw_mcc_log_lv)
{
struct phl_mcc_info *minfo = get_mcc_info(phl, hw_band);
if (minfo->fw_log_i.en_fw_mcc_log != en_fw_mcc_log) {
minfo->fw_log_i.en_fw_mcc_log = en_fw_mcc_log;
minfo->fw_log_i.update = true;
}
if (minfo->fw_log_i.fw_mcc_log_lv != fw_mcc_log_lv) {
minfo->fw_log_i.fw_mcc_log_lv = fw_mcc_log_lv;
minfo->fw_log_i.update = true;
}
}
void _mcc_up_fw_log_setting(struct phl_info_t *phl, struct phl_mcc_info *minfo)
{
struct phl_mcc_fw_log_info *fw_log_i = &minfo->fw_log_i;
if (fw_log_i->update) {
rtw_hal_cfg_fw_mcc_log(phl->hal, fw_log_i->en_fw_mcc_log);
fw_log_i->update = false;
}
}
void _mcc_set_unspecific_dur(struct phl_mcc_info *minfo)
{
struct rtw_phl_mcc_en_info *en_info = &minfo->en_info;
struct rtw_phl_mcc_role *m_role = NULL;
u8 midx = 0;
for (midx = 0; midx < MCC_ROLE_NUM; midx++) {
if (!(en_info->mrole_map & BIT(midx)))
continue;
m_role = &en_info->mcc_role[midx];
m_role->policy.dur_info.dur = MCC_DUR_NONSPECIFIC;
}
}
void _mcc_fill_dur_lim_info(struct phl_info_t *phl,
struct rtw_phl_mcc_role *mrole,
struct phl_mcc_dur_lim_req_info *dur_req)
{
struct rtw_phl_mcc_dur_info *dur_i = &mrole->policy.dur_info;
struct rtw_phl_mcc_dur_lim_info *dur_lim = &dur_i->dur_limit;
u64 tsf_lim = 0;
u32 max_toa = 0, max_tob = 0, max_dur = 0;
u32 bcn_intvl = mrole->bcn_intvl * TU;
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_fill_dur_lim_info(): dur_req: tag(%d), enable(%d), start_t_h(0x%08x), start_t_l(0x%08x), dur(%d), intvl(%d)\n",
dur_req->tag, dur_req->enable, dur_req->start_t_h,
dur_req->start_t_l, dur_req->dur, dur_req->intvl);
dur_lim->enable = false;
if (!dur_req->enable) {
goto exit;
}
if (bcn_intvl != dur_req->intvl) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "_mcc_fill_dur_lim_info(): not support bcn_intvl(%d) != dur_req.intvl(%d), please check code\n",
bcn_intvl, dur_req->intvl);
goto exit;
}
/*Assume bcn allocat in TSF % BcnInvtal = 0*/
tsf_lim = dur_req->start_t_h;
tsf_lim = tsf_lim << 32;
tsf_lim |= dur_req->start_t_l;
max_toa = (u16)_os_modular64(tsf_lim, mrole->bcn_intvl * TU);
if (max_toa >= (mrole->bcn_intvl * TU - dur_req->dur) ||
max_toa == 0) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "_mcc_fill_dur_lim_info(): not support bcn allocate in limited slot, please check code\n");
goto exit;
}
max_dur = dur_req->intvl - dur_req->dur;
max_toa = max_toa / TU;
max_dur = max_dur / TU;
max_tob = max_dur - max_toa;
dur_lim->max_toa = (u16)max_toa;
dur_lim->max_tob = (u16)max_tob;
dur_lim->max_dur = (u16)max_dur;
dur_lim->tag = dur_req->tag;
dur_lim->enable = true;
exit:
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_fill_dur_lim_info(): dur_lim_info: enable(%d), tag(%d), max_toa(%d), max_tob(%d), max_dur(%d)\n",
dur_lim->enable, dur_lim->tag, dur_lim->max_toa,
dur_lim->max_tob, dur_lim->max_dur);
return;
}
void _mcc_fill_default_policy(struct phl_info_t *phl,
struct rtw_phl_mcc_role *mrole)
{
struct rtw_phl_mcc_policy_info *policy = &mrole->policy;
struct phl_mcc_dur_lim_req_info dur_req = {0};
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_fill_default_policy(): set mcc policy by default setting\n");
policy->c2h_rpt = RTW_MCC_RPT_ALL;
policy->tx_null_early = 3;
policy->dis_tx_null = _mcc_is_client_category(mrole->wrole) ? 0 : 1;
policy->in_curr_ch = 0;
policy->dis_sw_retry = 1;
policy->sw_retry_count = 0;
policy->dur_info.dur = _mcc_is_client_category(mrole->wrole) ?
DEFAULT_CLIENT_DUR : DEFAULT_AP_DUR;
phl_mr_mcc_query_role_time_slot_lim(phl, mrole->wrole, &dur_req);
_mcc_fill_dur_lim_info(phl, mrole, &dur_req);
policy->rfk_chk = rtw_hal_check_ch_rfk(phl->hal, &mrole->wrole->chandef);
if (false == policy->rfk_chk) {
PHL_TRACE(COMP_PHL_MCC, _PHL_WARNING_, "_mcc_fill_default_policy(): No rfk, it will degrade perormance, please check code\n");
}
}
void _mcc_fill_mcc_role_policy_info(struct phl_info_t *phl,
struct rtw_wifi_role_t *wrole, struct rtw_phl_mcc_role *mrole)
{
struct phl_com_mcc_info *com_info = phl_to_com_mcc_info(phl);
struct rtw_phl_mcc_policy_info *policy = &mrole->policy;
struct rtw_phl_mcc_setting_info param = {0};
struct phl_mcc_info *minfo = get_mcc_info(phl, wrole->hw_band);
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, ">>> _mcc_fill_mcc_role_policy_info()\n");
_mcc_fill_default_policy(phl, mrole);
if (NULL == com_info->ops.mcc_get_setting)
goto exit;
param.wrole = wrole;
param.role_map = minfo->role_map;
param.tx_null_early = NONSPECIFIC_SETTING;
param.dur = NONSPECIFIC_SETTING;
if (!com_info->ops.mcc_get_setting(com_info->ops.priv, ¶m)) {
PHL_TRACE(COMP_PHL_MCC, _PHL_WARNING_, "_mcc_fill_mcc_role_policy_info(): mcc_get_setting from core layer fail\n");
goto exit;
}
if (NONSPECIFIC_SETTING != param.tx_null_early) {
PHL_TRACE(COMP_PHL_MCC, _PHL_WARNING_, "_mcc_fill_mcc_role_policy_info(): Core layer change tx_null_early from %d to %d\n",
policy->tx_null_early, param.tx_null_early);
policy->tx_null_early = param.tx_null_early;
}
if (NONSPECIFIC_SETTING != param.dur) {
PHL_TRACE(COMP_PHL_MCC, _PHL_WARNING_, "_mcc_fill_mcc_role_policy_info(): Core layer change dur from %d to %d\n",
policy->dur_info.dur, param.dur);
policy->dur_info.dur = param.dur;
}
_mcc_set_fw_log_info(phl, wrole->hw_band, param.en_fw_mcc_log,
param.fw_mcc_log_lv);
exit:
return;
}
void _mcc_fill_macid_bitmap_by_role(struct phl_info_t *phl,
struct rtw_phl_mcc_role *mrole)
{
struct macid_ctl_t *mc = phl_to_mac_ctrl(phl);
struct rtw_phl_mcc_macid_bitmap *used_macid = &mrole->used_macid;
u8 i = 0, max_map_idx = 0;
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, ">>> _mcc_fill_macid_bitmap_by_role()\n");
for (i = 0; i < PHL_MACID_MAX_ARRAY_NUM; i++) {
if ((mc->wifi_role_usedmap[mrole->wrole->id][i] != 0) &&
(max_map_idx <= i)) {
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_fill_macid_bitmap_by_role(): macid_map[%d]=0x%08x\n",
i, mc->wifi_role_usedmap[mrole->wrole->id][i]);
max_map_idx = i;
}
}
used_macid->bitmap = &mc->wifi_role_usedmap[mrole->wrole->id][0];
used_macid->len = (max_map_idx + 1) * sizeof(u32);
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_fill_macid_bitmap_by_role(): bitmap->len(%d), max_map_idx(%d)\n",
used_macid->len, max_map_idx);
}
enum rtw_phl_status _mcc_fill_mcc_role_basic_info(struct phl_info_t *phl,
struct rtw_wifi_role_t *wrole, struct rtw_phl_mcc_role *mrole)
{
enum rtw_phl_status status = RTW_PHL_STATUS_FAILURE;
struct rtw_phl_stainfo_t *sta = rtw_phl_get_stainfo_self(phl, wrole);
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, ">>> _mcc_fill_mcc_role_basic_info()\n");
if (sta == NULL) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "_mcc_fill_mcc_role_basic_info(): transfer mcc mode failed\n");
goto exit;
}
mrole->wrole = wrole;
mrole->macid = sta->macid;
#ifdef RTW_PHL_BCN
if (_mcc_is_ap_category(wrole))
mrole->bcn_intvl = (u16)wrole->bcn_cmn.bcn_interval;
else
#endif
mrole->bcn_intvl = sta->asoc_cap.bcn_interval;
if (mrole->bcn_intvl == 0) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "_mcc_fill_mcc_role_basic_info(): mrole->bcn_intvl ==0, please check code of core layer.\n");
goto exit;
}
mrole->chandef = &wrole->chandef;
_mcc_fill_macid_bitmap_by_role(phl, mrole);
status = RTW_PHL_STATUS_SUCCESS;
exit:
return status;
}
enum rtw_phl_status _mcc_fill_ref_role_info(struct phl_info_t *phl,
struct rtw_phl_mcc_en_info *en_info,
struct rtw_wifi_role_t *wrole)
{
enum rtw_phl_status status = RTW_PHL_STATUS_FAILURE;
struct rtw_phl_mcc_role *mrole = NULL;
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, ">>> _mcc_fill_ref_role_info()\n");
mrole = &en_info->mcc_role[REF_ROLE_IDX];
status = _mcc_fill_mcc_role_basic_info(phl, wrole, mrole);
if (RTW_PHL_STATUS_SUCCESS != status) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "_mcc_fill_ref_role_info(): set basic info failed\n");
goto exit;
}
_mcc_fill_mcc_role_policy_info(phl, wrole, mrole);
en_info->mrole_map |= BIT(REF_ROLE_IDX);
en_info->mrole_num++;
status = RTW_PHL_STATUS_SUCCESS;
exit:
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_fill_ref_role_info(): status(%d), wrole id(%d), wrole->type(%d), Fill mrole(%d) Info\n",
status, wrole->id, wrole->type, REF_ROLE_IDX);
return status;
}
enum rtw_phl_status _mcc_fill_role_info(struct phl_info_t *phl,
struct rtw_phl_mcc_en_info *en_info, u8 role_map,
struct rtw_wifi_role_t *cur_role)
{
enum rtw_phl_status status = RTW_PHL_STATUS_FAILURE;
struct rtw_wifi_role_t *wrole = NULL;
struct rtw_phl_mcc_role *mrole = NULL;
u8 ridx = 0, mcc_idx = 0;
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, ">>> _mcc_fill_role_info()\n");
if (RTW_PHL_STATUS_SUCCESS != _mcc_fill_ref_role_info(phl, en_info,
cur_role)) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "_mcc_fill_role_info(): set ref role info failed\n");
goto exit;
}
mcc_idx = en_info->mrole_num;
role_map &= ~(BIT(cur_role->id));
for (ridx = 0; ridx < MAX_WIFI_ROLE_NUMBER; ridx++) {
if (!(role_map & BIT(ridx)))
continue;
wrole = phl_get_wrole_by_ridx(phl, ridx);
if (wrole == NULL) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "_mcc_fill_role_info(): get wrole fail, role_idx(%d)\n",
ridx);
goto exit;
}
if (mcc_idx >= MCC_ROLE_NUM) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "_mcc_fill_role_info(): mcc_idx(%d) >= MCC_ROLE_NUM(%d)\n",
mcc_idx, MCC_ROLE_NUM);
goto exit;
}
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_fill_role_info(): wrole(%d), wrole->type(%d), Fill mrole(%d) Info\n",
ridx, wrole->type, mcc_idx);
mrole = &en_info->mcc_role[mcc_idx];
status = _mcc_fill_mcc_role_basic_info(phl, wrole, mrole);
if (RTW_PHL_STATUS_SUCCESS != status) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "_mcc_fill_role_info(): set basic info failed\n");
goto exit;
}
_mcc_fill_mcc_role_policy_info(phl, wrole, mrole);
en_info->mrole_map |= BIT(mcc_idx);
en_info->mrole_num++;
mcc_idx ++;
}
status = RTW_PHL_STATUS_SUCCESS;
exit:
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "<<< _mcc_fill_role_info(): status(%d), role_map(0x%x), mcc_role_map(0x%x)\n",
status, role_map, en_info->mrole_map);
return status;
}
void _mcc_fill_coex_mode(struct phl_info_t *phl, struct phl_mcc_info *minfo)
{
/* if get from core or ....
else*/
minfo->coex_mode = RTW_PHL_MCC_COEX_MODE_BT_MASTER;
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_fill_coex_mode(): Set default mode(%d)\n",
minfo->coex_mode);
}
void _mcc_fill_bt_dur(struct phl_info_t *phl, struct phl_mcc_info *minfo)
{
minfo->bt_info.bt_dur = (u16)rtw_hal_get_btc_req_slot(phl->hal);
minfo->bt_info.bt_seg_num = 1;
minfo->bt_info.bt_seg[0] = minfo->bt_info.bt_dur;
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_fill_bt_dur(): minfo->bt_info.bt_dur(%d)\n",
minfo->bt_info.bt_dur);
}
/*
* fill slot info
* @m_pattern: pattern info
* @bt_role: True: BT Role; False: Wifi Role
* @dur: time slot
* @mrole: mcc role info
*/
void _mcc_fill_slot_info(struct rtw_phl_mcc_pattern *m_pattern, bool bt_role,
u16 dur, struct rtw_phl_mcc_role *mrole)
{
if (m_pattern->slot_num >= SLOT_NUM) {
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_fill_slot_info(): Fail, m_pattern->slot_num(%d) >= SLOT_NUM(%d)\n",
m_pattern->slot_num, SLOT_NUM);
return;
}
if (bt_role) {
m_pattern->slot_order[m_pattern->slot_num].bt_role = true;
m_pattern->slot_order[m_pattern->slot_num].slot = dur;
m_pattern->slot_order[m_pattern->slot_num].mrole = NULL;
m_pattern->bt_slot_num++;
} else {
m_pattern->slot_order[m_pattern->slot_num].bt_role = false;
m_pattern->slot_order[m_pattern->slot_num].slot = dur;
m_pattern->slot_order[m_pattern->slot_num].mrole = mrole;
}
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "m_pattern->slot_num(): Fill slot, Idx(%d), dur(%d), is BT slot(%d)\n",
m_pattern->slot_num, dur, bt_role);
m_pattern->slot_num++;
}
void _mcc_reset_minfo(struct phl_info_t *phl, struct phl_mcc_info *minfo,
enum _mcc_minfo_reset_type reset_type)
{
void *priv = phl_to_drvpriv(phl);
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, ">>> _mcc_reset_minfo\n");
if (reset_type == MINFO_RESET_ALL) {
_os_mem_set(priv, minfo, 0, sizeof(struct phl_mcc_info));
return;
}
if (reset_type & MINFO_RESET_EN_INFO)
_os_mem_set(priv, &minfo->en_info, 0,
sizeof(struct rtw_phl_mcc_en_info));
if (reset_type & MINFO_RESET_MODE)
_os_mem_set(priv, &minfo->mcc_mode, 0,
sizeof(enum rtw_phl_tdmra_wmode));
if (reset_type & MINFO_RESET_ROLE_MAP)
_os_mem_set(priv, &minfo->role_map, 0,
sizeof(minfo->role_map));
if (reset_type & MINFO_RESET_STATE)
_os_mem_set(priv, &minfo->state, 0,
sizeof(enum rtw_phl_mcc_state));
if (reset_type & MINFO_RESET_COEX_MODE)
_os_mem_set(priv, &minfo->coex_mode, 0,
sizeof(enum rtw_phl_mcc_coex_mode));
if (reset_type & MINFO_RESET_BT_INFO)
_os_mem_set(priv, &minfo->bt_info, 0,
sizeof(struct rtw_phl_mcc_bt_info));
if (reset_type & MINFO_RESET_PATTERN_INFO)
_os_mem_set(priv, &minfo->en_info.m_pattern, 0,
sizeof(struct rtw_phl_mcc_pattern));
}
void _mcc_clean_noa(struct phl_info_t *phl, struct rtw_phl_mcc_en_info *en_info)
{
struct phl_com_mcc_info *com_info = phl_to_com_mcc_info(phl);
struct rtw_phl_mcc_noa param = {0};
if (com_info == NULL) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "_mcc_clean_noa(): Get mcc common info failed\n");
} else if (com_info->ops.mcc_update_noa) {
struct rtw_phl_mcc_role *ap_role = NULL;
ap_role = _mcc_get_mrole_by_category(en_info, MCC_ROLE_AP_CAT);
if (NULL == ap_role) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "_mcc_clean_noa(): Get AP role fail\n");
goto exit;
}
param.wrole = ap_role->wrole;
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_clean_noa()\n");
com_info->ops.mcc_update_noa(com_info->ops.priv, ¶m);
}
exit:
return;
}
bool _tdmra_calc_noa_2wrole(struct phl_info_t *phl, struct phl_mcc_info *minfo,
struct rtw_phl_mcc_noa *param)
{
struct rtw_phl_mcc_en_info *en_info = &minfo->en_info;
struct rtw_phl_mcc_bt_info *bt = &minfo->bt_info;
struct rtw_phl_mcc_role *role_ref = get_ref_role(en_info);
struct rtw_phl_mcc_role *role_ano = (role_ref == &en_info->mcc_role[0])
? &en_info->mcc_role[1] : &en_info->mcc_role[0];
u16 d_r = role_ref->policy.dur_info.dur;
u16 d_a = role_ano->policy.dur_info.dur;
u64 mcc_start = 0, noa_start = 0;
bool ret = false;
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, ">>> _tdmra_calc_noa_2wrole()\n");
mcc_start = en_info->tsf_high;
mcc_start = mcc_start << 32;
mcc_start |= en_info->tsf_low;
if (_mcc_is_ap_category(role_ref->wrole)){
/*calculate end time of GO*/
noa_start = mcc_start + (d_r * TU);
param->dur = en_info->mcc_intvl - d_r;
param->wrole = role_ref->wrole;
} else {
u32 tsf_ref_h = 0, tsf_ref_l = 0, tsf_ano_h = 0, tsf_ano_l = 0;
u64 tsf_ref = 0, tsf_ano = 0;
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_tdmra_calc_noa_2wrole(): AP Role isn't ref role, we need to get 2 port tsf\n");
_mcc_dump_bt_ino(bt);
if (RTW_HAL_STATUS_SUCCESS != rtw_hal_mcc_get_2ports_tsf(
phl->hal, role_ref->group, role_ref->macid,
role_ano->macid, &tsf_ref_h, &tsf_ref_l,
&tsf_ano_h, &tsf_ano_l)) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "_tdmra_calc_noa_2wrole(): Get 2 port tsf failed\n");
goto exit;
}
tsf_ref = tsf_ref_h;
tsf_ref = tsf_ref << 32;
tsf_ref |= tsf_ref_l;
tsf_ano = tsf_ano_h;
tsf_ano = tsf_ano << 32;
tsf_ano |= tsf_ano_l;
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_tdmra_calc_noa_2wrole(): tsf_ref: 0x%08X %08x, tsf_ano: 0x%08x %08x\n",
(u32)(tsf_ref >> 32), (u32)tsf_ref,
(u32)(tsf_ano >> 32), (u32)tsf_ano);
/*calculate end time of GO*/
noa_start = mcc_start + (en_info->mcc_intvl * TU);
if (bt->add_bt_role) {
if(bt->bt_seg_num == 1) {
noa_start -= (bt->bt_seg[0] * TU);
} else if (bt->bt_seg_num == 2) {
noa_start -= (bt->bt_seg[1] * TU);
} else {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "_tdmra_calc_noa_2wrole(): error bt_seg_num(%d), please check code flow\n",
bt->bt_seg_num);
goto exit;
}
}
noa_start = noa_start - tsf_ref + tsf_ano;
param->dur = en_info->mcc_intvl - d_a;
param->wrole = role_ano->wrole;
}
param->start_t_h = noa_start >> 32;
param->start_t_l = (u32)noa_start;
param->cnt = 255;
param->interval = en_info->mcc_intvl;
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_tdmra_calc_noa_2wrole(): IsGORef(%d), mcc_start(0x%08x %08x)\n",
_mcc_is_ap_category(role_ref->wrole),
(u32)(mcc_start >> 32), (u32)mcc_start);
ret = true;
exit:
return ret;
}
bool _tdmra_calc_noa_1wrole(struct phl_info_t *phl, struct phl_mcc_info *minfo,
struct rtw_phl_mcc_noa *param)
{
struct rtw_phl_mcc_en_info *en_info = &minfo->en_info;
struct rtw_phl_mcc_role *role_ref = get_ref_role(en_info);
u16 d_r = role_ref->policy.dur_info.dur;
u64 mcc_start = 0, noa_start = 0;
bool ret = false;
mcc_start = en_info->tsf_high;
mcc_start = mcc_start << 32;
mcc_start |= en_info->tsf_low;
/*calculate end time of GO*/
noa_start = mcc_start + (d_r * TU);
param->dur = en_info->mcc_intvl - d_r;
param->wrole = role_ref->wrole;
param->start_t_h = noa_start >> 32;
param->start_t_l = (u32)noa_start;
param->cnt = 255;
param->interval = en_info->mcc_intvl;
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_tdmra_calc_noa_1wrole(): IsGORef(%d), mcc_start(0x%08x %08x)\n",
_mcc_is_ap_category(role_ref->wrole),
(u32)(mcc_start >> 32), (u32)mcc_start);
ret = true;
return ret;
}
void _mcc_up_noa(struct phl_info_t *phl, struct phl_mcc_info *minfo)
{
struct phl_com_mcc_info *com_info = phl_to_com_mcc_info(phl);
struct rtw_phl_mcc_noa param = {0};
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, ">>> _mcc_up_noa()\n");
if (com_info == NULL) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "_mcc_up_noa(): Get mcc common info failed\n");
goto exit;
}
if (!com_info->ops.mcc_update_noa)
goto exit;
if (RTW_PHL_TDMRA_AP_CLIENT_WMODE == minfo->mcc_mode) {
if (false == _tdmra_calc_noa_2wrole(phl, minfo, ¶m))
goto exit;
} else if (RTW_PHL_TDMRA_AP_WMODE == minfo->mcc_mode) {
if (false == _tdmra_calc_noa_1wrole(phl, minfo, ¶m))
goto exit;
} else {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "_mcc_up_noa(): error wmode\n");
_mcc_dump_mode(&minfo->mcc_mode);
goto exit;
}
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_up_noa(): NOA_start(0x%08x %08x), dur(%d), cnt(%d), interval(%d)\n",
param.start_t_h, param.start_t_l, param.dur, param.cnt,
param.interval);
com_info->ops.mcc_update_noa(com_info->ops.priv, ¶m);
exit:
return;
}
bool _mcc_adjust_dur_for_2g_mcc_2role_bt(struct phl_mcc_info *minfo)
{
struct rtw_phl_mcc_bt_info *bt_info = &minfo->bt_info;
struct rtw_phl_mcc_en_info *en_info = &minfo->en_info;
struct rtw_phl_mcc_role *m_role1 = &en_info->mcc_role[0];
struct rtw_phl_mcc_role *m_role2 = &en_info->mcc_role[1];
u16 d1 = m_role1->policy.dur_info.dur;
u16 d2 = m_role2->policy.dur_info.dur;
u16 d1_max = (m_role1->policy.dur_info.dur_limit.enable) ?
m_role1->policy.dur_info.dur_limit.max_dur : en_info->mcc_intvl;
u16 d2_max = (m_role2->policy.dur_info.dur_limit.enable) ?
m_role2->policy.dur_info.dur_limit.max_dur : en_info->mcc_intvl;
u16 d1_min = _mcc_is_ap_category(m_role1->wrole) ?
MIN_AP_DUR : MIN_CLIENT_DUR;
u16 d2_min = _mcc_is_ap_category(m_role2->wrole) ?
MIN_AP_DUR : MIN_CLIENT_DUR;
u16 wifi_dur = 0, bt_dur = bt_info->bt_dur;
u16 i = 0;
bool adjust_ok = false;
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_adjust_dur_for_2g_mcc_2role_bt(): mcc_intvl(%d), bt_dur(%d), d1(%d), d2(%d), d1_min(%d), d1_max(%d), d2_min(%d), d2_max(%d)\n",
en_info->mcc_intvl, bt_dur, d1, d2, d1_min, d1_max, d2_min, d2_max);
for (i = 0; i < en_info->mcc_intvl; i++) {
wifi_dur = en_info->mcc_intvl - bt_dur;
d1 = ((d1 * 100 / (d1 + d2)) * wifi_dur) / 100;
if (d1 < d1_min) {
d1 = d1_min;
} else if (d1 > d1_max) {
d1 = d1_max;
}
d2 = wifi_dur - d1;
if (d2 < d2_min) {
d2 = d2_min;
d1 = wifi_dur - d2;
} else if (d2 > d2_max) {
d2 = d2_max;
d1 = wifi_dur - d2;
}
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_adjust_dur_for_2g_mcc_2role_bt(): Loop bt_dur(%d), d1(%d), d2(%d)\n",
bt_dur, d1, d2);
if ((d1 >= d1_min) && (d1 <= d1_max) &&
(d2 >= d2_min) && (d2 <= d2_max)) {
m_role1->policy.dur_info.dur = d1;
m_role2->policy.dur_info.dur = d2;
bt_info->bt_dur = bt_dur;
en_info->m_pattern.d_r_d_a_spacing_max = bt_info->bt_dur;
adjust_ok = true;
break;
}
bt_dur--;
}
if (adjust_ok == false){
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "_mcc_adjust_dur_for_2g_mcc_2role_bt(): Adjust fail\n");
} else {
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_adjust_dur_for_2g_mcc_2role_bt(): Adjust ok, d1(%d), d2(%d), bt dur(%d), d_r_d_a_spacing_max(%d)\n",
m_role1->policy.dur_info.dur,
m_role2->policy.dur_info.dur,
bt_info->bt_dur, en_info->m_pattern.d_r_d_a_spacing_max);
}
return adjust_ok;
}
void _mcc_adjust_dur_for_2_band_mcc_2role_bt(struct phl_mcc_info *minfo,
struct rtw_phl_mcc_role *role_2g, struct rtw_phl_mcc_role *role_non_2g)
{
struct rtw_phl_mcc_en_info *en_info = &minfo->en_info;
enum rtw_phl_mcc_coex_mode *coex_mode = &minfo->coex_mode;
struct rtw_phl_mcc_policy_info *plcy_2g = &role_2g->policy;
struct rtw_phl_mcc_policy_info *plcy_non2g = &role_non_2g->policy;
u16 *bt_dur = &minfo->bt_info.bt_dur;
u16 dur_2g = 0, dur_non2g = 0;
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_adjust_dur_for_2_band_mcc_2role_bt(): coex_mode(%d) 2G_Dur(%d), 5G_Dur(%d), BT_Dur(%d)\n",
*coex_mode, plcy_2g->dur_info.dur, plcy_non2g->dur_info.dur,
*bt_dur);
if (plcy_non2g->dur_info.dur >= *bt_dur) {
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_adjust_dur_for_2_band_mcc_2role_bt(): 5G_Dur(%d) >= BT_Dur(%d), no need to adjust 5G duration for BT\n",
plcy_non2g->dur_info.dur, *bt_dur);
goto exit;
}
if (plcy_non2g->dur_info.dur_limit.enable &&
plcy_non2g->dur_info.dur_limit.max_dur < *bt_dur) {
PHL_TRACE(COMP_PHL_MCC, _PHL_WARNING_, "_mcc_adjust_dur_for_2_band_mcc_2role_bt(): dur_limit.max_dur(%d) < bt_dur(%d), We can't adjust 5G duration(%d) for BT\n",
plcy_non2g->dur_info.dur_limit.max_dur,
*bt_dur, plcy_non2g->dur_info.dur);
goto exit;
}
if (*coex_mode == RTW_PHL_MCC_COEX_MODE_BT_MASTER) {
dur_non2g = *bt_dur;
dur_2g = en_info->mcc_intvl - dur_non2g;
if (plcy_2g->dur_info.dur_limit.enable &&
plcy_2g->dur_info.dur_limit.max_dur < dur_2g) {
dur_2g = plcy_2g->dur_info.dur_limit.max_dur;
dur_non2g = en_info->mcc_intvl - dur_2g;
PHL_TRACE(COMP_PHL_MCC, _PHL_WARNING_, "_mcc_adjust_dur_for_2_band_mcc_2role_bt(): plcy_2g->max_dur(%d) < dur_2g(%d), We can adjust some 5G duration for BT\n",
plcy_2g->dur_info.dur_limit.max_dur, dur_2g);
}
plcy_non2g->dur_info.dur = dur_non2g;
plcy_2g->dur_info.dur = dur_2g;
PHL_TRACE(COMP_PHL_MCC, _PHL_WARNING_, "_mcc_adjust_dur_for_2_band_mcc_2role_bt(): coex_mode == RTW_PHL_MCC_COEX_MODE_BT_MASTER, we adjust 5G duration for BT\n");
} else if (*coex_mode == RTW_PHL_MCC_COEX_MODE_WIFI_MASTER) {
PHL_TRACE(COMP_PHL_MCC, _PHL_WARNING_, "_mcc_adjust_dur_for_2_band_mcc_2role_bt(): coex_mode == RTW_PHL_MCC_COEX_MODE_WIFI_MASTER, we don't adjust 5G duration for BT\n");
goto exit;
} else {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "_mcc_adjust_dur_for_2_band_mcc_2role_bt(): coex_mode(%d), Undefined mode, ignore bt duration\n",
*coex_mode);
goto exit;
}
PHL_TRACE(COMP_PHL_MCC, _PHL_WARNING_, "_mcc_adjust_dur_for_2_band_mcc_2role_bt(): After adjust, 2G_Dur(%d), 5G_Dur(%d), BT_Dur(%d)\n",
plcy_2g->dur_info.dur, plcy_non2g->dur_info.dur,
*bt_dur);
exit:
return;
}
bool _mcc_need_to_seg_bt_dur(struct phl_mcc_info *minfo)
{
/* Not ready for implementation*/
return false;
#if 0
bool seg = false;
struct rtw_phl_mcc_en_info *info = &minfo->en_info;
struct rtw_phl_mcc_dur_lim_info *limit_i = NULL;
u8 i = 0;
if (minfo->mcc_mode != RTW_PHL_TDMRA_2CLIENTS_WMODE)
goto exit;
if (minfo->bt_info.bt_dur < BT_DUR_SEG_TH || BT_SEG_NUM < 2)
goto exit;
for (i = 0; i < MCC_ROLE_NUM; i++) {
limit_i = &minfo->en_info.mcc_role[i].policy.dur_info.dur_limit;
if (limit_i->enable) {
PHL_TRACE(COMP_PHL_MCC, _PHL_WARNING_, "_mcc_need_to_seg_bt_dur(): Can't seg bt slot when wifi slot with limitation\n");
goto exit;
}
}
seg = true;
exit:
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_need_to_seg_bt_dur(): seg(%d)\n",
seg);
return seg;
#endif
}
/*
* 2 wifi slot req + bt slot req
*/
void _mcc_discision_dur_for_2g_mcc_2role_bt(struct phl_mcc_info *minfo)
{
struct rtw_phl_mcc_en_info *en_info = &minfo->en_info;
struct rtw_phl_mcc_role *m_role1 = &en_info->mcc_role[0];
struct rtw_phl_mcc_role *m_role2 = &en_info->mcc_role[1];
u16 d1 = 0, d2 = 0;
/* Segment bt slot, don't support in current code*/
if (_mcc_need_to_seg_bt_dur(minfo)) {
/*2 wifi slot + 2bt slot*/
d1 = m_role1->policy.dur_info.dur;
d2 = m_role2->policy.dur_info.dur;
en_info->mcc_intvl = WORSECASE_INTVL;
_mcc_adjust_dur_for_2g_mcc_2role_bt(minfo);
minfo->bt_info.bt_seg_num = 2;
minfo->bt_info.bt_seg[0] = minfo->bt_info.bt_dur / 2;
minfo->bt_info.bt_seg[1] = minfo->bt_info.bt_dur -
minfo->bt_info.bt_seg[0];
PHL_TRACE(COMP_PHL_MCC, _PHL_WARNING_, "_mcc_discision_dur_for_2g_mcc_2role_bt(): Change D1(%d), D2(%d) to D1(%d), D2(%d), bt_seg[0](%d), bt_seg[1](%d)\n",
d1, d2, m_role1->policy.dur_info.dur,
m_role2->policy.dur_info.dur,
minfo->bt_info.bt_seg[0], minfo->bt_info.bt_seg[1]);
} else {
/*2 wifi slot + 1bt slot*/
if (minfo->bt_info.bt_dur > BT_DUR_MAX_2WS) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "_mcc_discision_dur_for_2g_mcc_2role_bt(): bt req slot(%d) > BT_DUR_MAX_2WS(%d)\n",
minfo->bt_info.bt_dur, BT_DUR_MAX_2WS);
minfo->bt_info.bt_dur = BT_DUR_MAX_2WS;
minfo->bt_info.bt_seg[0] = BT_DUR_MAX_2WS;
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "_mcc_discision_dur_for_2g_mcc_2role_bt(): set bt rq slot to (%d)\n",
minfo->bt_info.bt_dur);
}
_mcc_adjust_dur_for_2g_mcc_2role_bt(minfo);
}
}
bool _mcc_discision_duration_for_2role_bt_v2(struct phl_mcc_info *minfo)
{
struct rtw_phl_mcc_en_info *en_info = &minfo->en_info;
struct rtw_phl_mcc_bt_info *bt_info = &minfo->bt_info;
struct rtw_phl_mcc_role *m_role1 = &en_info->mcc_role[0];
struct rtw_phl_mcc_role *m_role2 = &en_info->mcc_role[1];
bool add_extra_bt_role = false;
if (bt_info->bt_dur == 0)
goto exit;
if (m_role1->chandef->band == BAND_ON_24G &&
m_role2->chandef->band == BAND_ON_24G) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "_mcc_discision_duration_for_2role_bt_v2(): Not support , We will ignore Bt slot\n");
#if 0
if (_mcc_adjust_dur_for_2g_mcc_2role_bt(minfo)) {
add_extra_bt_role = true;
} else {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "_mcc_discision_duration_for_2role_bt(): Adjust dur fail, We will ignore Bt slot\n");
}
#endif
goto exit;
}
if (m_role1->chandef->band != BAND_ON_24G &&
m_role2->chandef->band != BAND_ON_24G) {
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_discision_duration_for_2role_bt_v2(): All 5G, Don't care BT duration\n");
goto exit;
}
if (m_role1->chandef->band == BAND_ON_24G)
_mcc_adjust_dur_for_2_band_mcc_2role_bt(minfo, m_role1, m_role2);
else
_mcc_adjust_dur_for_2_band_mcc_2role_bt(minfo, m_role2, m_role1);
exit:
return add_extra_bt_role;
}
bool _mcc_discision_duration_for_2role_bt(struct phl_mcc_info *minfo)
{
struct rtw_phl_mcc_en_info *en_info = &minfo->en_info;
struct rtw_phl_mcc_role *m_role1 = &en_info->mcc_role[0];
struct rtw_phl_mcc_role *m_role2 = &en_info->mcc_role[1];
bool add_extra_bt_role = false;
if (minfo->bt_info.bt_dur == 0)
goto exit;
if (m_role1->chandef->band == BAND_ON_24G &&
m_role2->chandef->band == BAND_ON_24G) {
_mcc_discision_dur_for_2g_mcc_2role_bt(minfo);
add_extra_bt_role = true;
goto exit;
}
if (m_role1->chandef->band != BAND_ON_24G &&
m_role2->chandef->band != BAND_ON_24G) {
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_discision_duration_for_2role_bt(): All 5G, Don't care BT duration\n");
goto exit;
}
if (m_role1->chandef->band == BAND_ON_24G)
_mcc_adjust_dur_for_2_band_mcc_2role_bt(minfo, m_role1, m_role2);
else
_mcc_adjust_dur_for_2_band_mcc_2role_bt(minfo, m_role2, m_role1);
exit:
return add_extra_bt_role;
}
enum rtw_phl_status _mcc_calculate_start_tsf(struct phl_info_t *phl,
struct rtw_phl_mcc_en_info *en_info
)
{
enum rtw_phl_status status = RTW_PHL_STATUS_FAILURE;
struct rtw_phl_mcc_role *ref_role = get_ref_role(en_info);
u32 tsf_h = 0, tsf_l = 0;
u64 tsf = 0, start_tsf = 0;
u8 i = 0, max_loop = 10, calc_done = 0;
u16 offset = 0;
if (RTW_HAL_STATUS_SUCCESS != rtw_hal_get_tsf(phl->hal,
ref_role->wrole->hw_port, &tsf_h, &tsf_l)) {
PHL_TRACE(COMP_PHL_MCC, _PHL_WARNING_, "_mcc_calculate_start_tsf(): Get tsf fail\n");
goto exit;
}
tsf = tsf_h;
tsf = tsf << 32;
tsf |= tsf_l;
/*calculate the value between current TSF and TBTT*/
phl_calc_offset_from_tbtt(phl, ref_role->wrole, tsf, &offset);
start_tsf = tsf - ((offset + en_info->m_pattern.tob_r) * TU);
for (i = 0; i < max_loop; i++) {
if (start_tsf < (tsf + (MIN_TRIGGER_MCC_TIME * TU))) {
start_tsf += (ref_role->bcn_intvl * TU);
} else {
calc_done = 1;
break;
}
}
if (!calc_done) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "_mcc_calculate_start_tsf(): Calcculate start tsf fail, please check code flow\n");
goto exit;
}
en_info->tsf_high = start_tsf >> 32;
en_info->tsf_low = (u32)start_tsf;
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_calculate_start_tsf(): start_tsf(0x%08x %08x), cur_tsf(0x%08x %08x), ref_role->bcn_intvl(%d), ref_role->duration(%d)\n",
(u32)(start_tsf >> 32), (u32)start_tsf, (u32)(tsf >> 32),
(u32)tsf, ref_role->bcn_intvl, ref_role->policy.dur_info.dur);
status = RTW_PHL_STATUS_SUCCESS;
exit:
return status;
}
void _mcc_set_2_clients_worsecase_default_pattern(struct rtw_phl_mcc_pattern *m_pattern,
u16 dur_ref)
{
m_pattern->toa_r = CLIENTS_WORSECASE_REF_TOA;
m_pattern->tob_r = dur_ref - m_pattern->toa_r;
_mcc_fill_slot_info(m_pattern, false,
m_pattern->role_ref->policy.dur_info.dur,
m_pattern->role_ref);
_mcc_fill_slot_info(m_pattern, false,
m_pattern->role_ano->policy.dur_info.dur,
m_pattern->role_ano);
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_set_2_clients_worsecase_default_pattern(): tob_r(%d), toa_r(%d)\n",
m_pattern->tob_r, m_pattern->toa_r);
}
/*
* get bcn offset for 2 wifi slot and 1 bt slot for pattern1
* Pattern:
* | Dur_r | Dur_a | Bt slot |
* bcn bcn
* | tob_r | toa_r|tob_a | toa_a| Bt slot |
*/
s16 _mcc_get_offset_for_2_wslot_1_btslot_p1(s16 dur_r,
s16 tob_r, s16 tob_a)
{
return dur_r - tob_r + tob_a;
}
/*
* get bcn offset for 2 wifi slot and 1 bt slot for pattern2
* Pattern:
* | Dur_r | Bt slot | Dur_a |
* bcn bcn
* | tob_r | toa_r| Bt slot |tob_a | toa_a|
*/
s16 _mcc_get_offset_for_2_wslot_1_btslot_p2(s16 dur_r, s16 bt_dur,
s16 tob_r, s16 tob_a)
{
return dur_r - tob_r + bt_dur + tob_a;
}
void _mcc_get_offset_range_for_2wslot_1btslot_p1(s16 ref_dur, s16 ano_dur,
s16 *bcn_min, s16 *bcn_max)
{
s16 min1 = 0, max1 = 0;
*bcn_min = _mcc_get_offset_for_2_wslot_1_btslot_p1(ref_dur, EARLY_RX_BCN_T,
EARLY_RX_BCN_T);
*bcn_max = _mcc_get_offset_for_2_wslot_1_btslot_p1(ref_dur, EARLY_RX_BCN_T,
ano_dur - MIN_RX_BCN_T);
min1 = _mcc_get_offset_for_2_wslot_1_btslot_p1(ref_dur,
ref_dur - MIN_RX_BCN_T, EARLY_RX_BCN_T);
max1 = _mcc_get_offset_for_2_wslot_1_btslot_p1(ref_dur,
ref_dur - MIN_RX_BCN_T, ano_dur - MIN_RX_BCN_T);
if (min1 < *bcn_min)
*bcn_min = min1;
if (max1 > *bcn_max)
*bcn_max = max1;
PHL_TRACE(COMP_PHL_MCC, _PHL_WARNING_, "_mcc_get_offset_range_for_2wslot_1btslot_p1(): min(%d), max(%d)\n",
*bcn_min, *bcn_max);
}
void _mcc_get_offset_range_for_2wslot_1btslot_p2(s16 ref_dur, s16 ano_dur,
s16 bt_dur, s16 *bcn_min, s16 *bcn_max)
{
s16 min1 = 0, max1 = 0;
*bcn_min = _mcc_get_offset_for_2_wslot_1_btslot_p2(ref_dur, bt_dur,
EARLY_RX_BCN_T, EARLY_RX_BCN_T);
*bcn_max = _mcc_get_offset_for_2_wslot_1_btslot_p2(ref_dur, bt_dur,
EARLY_RX_BCN_T, ano_dur - MIN_RX_BCN_T);
min1 = _mcc_get_offset_for_2_wslot_1_btslot_p2(ref_dur, bt_dur,
ref_dur - MIN_RX_BCN_T, EARLY_RX_BCN_T);
min1 = _mcc_get_offset_for_2_wslot_1_btslot_p2(ref_dur, bt_dur,
ref_dur - MIN_RX_BCN_T, ano_dur - MIN_RX_BCN_T);
if (min1 < *bcn_min)
*bcn_min = min1;
if (max1 > *bcn_max)
*bcn_max = max1;
PHL_TRACE(COMP_PHL_MCC, _PHL_WARNING_, "_mcc_get_offset_range_for_2wslot_1btslot_p2(): min(%d), max(%d)\n",
*bcn_min, *bcn_max);
}
s16 _mcc_get_offset_for_2_clients_worsecase(s16 ref_dur, s16 ano_dur,
u16 ref_bcn_intvl, s16 toa_ref, s16 tob_ano)
{
return toa_ref + tob_ano + ref_dur + ano_dur - (2 * ref_bcn_intvl);
}
void _mcc_get_offset_range_for_2_clients_worsecase(s16 ref_dur, s16 ano_dur,
u16 ref_bcn_intvl, s16 *bcn_min, s16 *bcn_max)
{
s16 min1 = 0, min2 = 0, max1 = 0, max2 = 0;
min1 = _mcc_get_offset_for_2_clients_worsecase(ref_dur, ano_dur,
ref_bcn_intvl, MIN_RX_BCN_T, EARLY_RX_BCN_T);
max1 = _mcc_get_offset_for_2_clients_worsecase(ref_dur, ano_dur,
ref_bcn_intvl, MIN_RX_BCN_T, ano_dur -
MIN_RX_BCN_T);
min2 = _mcc_get_offset_for_2_clients_worsecase(ref_dur, ano_dur,
ref_bcn_intvl, ref_dur - EARLY_RX_BCN_T,
EARLY_RX_BCN_T);
max2 = _mcc_get_offset_for_2_clients_worsecase(ref_dur, ano_dur,
ref_bcn_intvl, ref_dur -EARLY_RX_BCN_T,
ano_dur - MIN_RX_BCN_T);
if (min1 < min2)
*bcn_min = min1;
else
*bcn_min = min2;
if (max1 > max2)
*bcn_max = max1;
else
*bcn_max = max2;
}
/*
* copy from _mcc_calc_2wslot_1btslot_nego_p1
* | Wifi slot1 | Bt slot | Wifi slot2 |
*/
bool _mcc_calc_2wslot_1btslot_nego_p2(struct rtw_phl_mcc_dur_info *ref_dur,
struct rtw_phl_mcc_dur_info *ano_dur, s16 bt_dur,
s16 offset, struct rtw_phl_mcc_pattern *m_pattern)
{
bool cal_ok = false;
s16 tob_r = 0, toa_r = 0, tob_a = 0, toa_a = 0;
if ((ref_dur->dur_limit.enable) && (ano_dur->dur_limit.enable)) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "_mcc_calc_2wslot_1btslot_nego_p2(): not support, both enable slot limitation\n");
/*todo*/
goto exit;
}
if (ref_dur->dur_limit.enable) {
tob_r = ref_dur->dur / 2;
toa_r = ref_dur->dur - tob_r;
if (tob_r > ref_dur->dur_limit.max_tob) {
tob_r = ref_dur->dur_limit.max_tob;
toa_r = ref_dur->dur - tob_r;
}
if (toa_r > ref_dur->dur_limit.max_toa) {
toa_r = ref_dur->dur_limit.max_toa;
tob_r = ref_dur->dur - toa_r;
}
if ((tob_r > ref_dur->dur_limit.max_tob) ||
(toa_r > ref_dur->dur_limit.max_toa)) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "_mcc_calc_2wslot_1btslot_nego_p2(): After adjust tob_r(%d) > max_tob(%d) or toa_r(%d) > max_toa(%d)\n",
tob_r, ref_dur->dur_limit.max_tob,
toa_r, ref_dur->dur_limit.max_toa);
goto exit;
}
tob_a = offset - toa_r - bt_dur;
toa_a = ano_dur->dur - tob_a;
if (tob_a <= 0 || toa_a <= 0) {
m_pattern->courtesy_i.c_en= true;
m_pattern->courtesy_i.c_num = 3;
m_pattern->courtesy_i.src_role = m_pattern->role_ref;
m_pattern->courtesy_i.tgt_role = m_pattern->role_ano;
PHL_TRACE(COMP_PHL_MCC, _PHL_WARNING_, "_mcc_calc_2wslot_1btslot_nego_p2(): Limi by ref dur, courtesy_info: c_en(%d), c_num(%d), src_role->macid(0x%x), tgt_role->macid(0x%x)\n",
m_pattern->courtesy_i.c_en,
m_pattern->courtesy_i.c_num,
m_pattern->courtesy_i.src_role->macid,
m_pattern->courtesy_i.tgt_role->macid);
}
goto fill_pattern;
} else if (ano_dur->dur_limit.enable) {
tob_a = ano_dur->dur / 2;
toa_a = ano_dur->dur - tob_a;
if (tob_a > ano_dur->dur_limit.max_tob) {
tob_a = ano_dur->dur_limit.max_tob;
toa_a = ano_dur->dur - tob_a;
}
if (toa_a > ano_dur->dur_limit.max_toa) {
toa_a = ano_dur->dur_limit.max_toa;
tob_a = ano_dur->dur - toa_a;
}
if ((tob_a > ano_dur->dur_limit.max_tob) ||
(toa_a > ano_dur->dur_limit.max_toa)) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "_mcc_calc_2wslot_1btslot_nego_p2(): After adjust tob_r(%d) > max_tob(%d) or toa_r(%d) > max_toa(%d)\n",
tob_a, ano_dur->dur_limit.max_tob,
toa_a, ano_dur->dur_limit.max_toa);
goto exit;
}
toa_r = offset - tob_a - bt_dur;
tob_r = ref_dur->dur - toa_r;
if (toa_r <= 0 || tob_r <= 0) {
m_pattern->courtesy_i.c_en= true;
m_pattern->courtesy_i.c_num = 3;
m_pattern->courtesy_i.src_role = m_pattern->role_ano;
m_pattern->courtesy_i.tgt_role = m_pattern->role_ref;
PHL_TRACE(COMP_PHL_MCC, _PHL_WARNING_, "_mcc_calc_2wslot_1btslot_nego_p2(): Limi by ano dur, courtesy_info: c_en(%d), c_num(%d), src_role->macid(0x%x), tgt_role->macid(0x%x)\n",
m_pattern->courtesy_i.c_en,
m_pattern->courtesy_i.c_num,
m_pattern->courtesy_i.src_role->macid,
m_pattern->courtesy_i.tgt_role->macid);
}
goto fill_pattern;
} else {
tob_r = ref_dur->dur / 2;
toa_r = ref_dur->dur - tob_r;
tob_a = offset - toa_r - bt_dur;
toa_a = ano_dur->dur - tob_a;
if (tob_a <= 0 || toa_a <= 0) {
m_pattern->courtesy_i.c_en= true;
m_pattern->courtesy_i.c_num = 3;
m_pattern->courtesy_i.src_role = m_pattern->role_ref;
m_pattern->courtesy_i.tgt_role = m_pattern->role_ano;
PHL_TRACE(COMP_PHL_MCC, _PHL_WARNING_, "_mcc_calc_2wslot_1btslot_nego_p2(): Limi by ref dur, courtesy_info: c_en(%d), c_num(%d), src_role->macid(0x%x), tgt_role->macid(0x%x)\n",
m_pattern->courtesy_i.c_en,
m_pattern->courtesy_i.c_num,
m_pattern->courtesy_i.src_role->macid,
m_pattern->courtesy_i.tgt_role->macid);
}
goto fill_pattern;
}
fill_pattern:
cal_ok = true;
m_pattern->tob_r = tob_r;
m_pattern->toa_r = toa_r;
m_pattern->tob_a = tob_a;
m_pattern->toa_a = toa_a;
_mcc_fill_slot_info(m_pattern, false, ref_dur->dur, m_pattern->role_ref);
_mcc_fill_slot_info(m_pattern, true, bt_dur, NULL);
_mcc_fill_slot_info(m_pattern, false, ano_dur->dur, m_pattern->role_ano);
exit:
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_calc_2wslot_1btslot_nego_p2(): calc nego patter ok(%d)\n",
cal_ok);
return cal_ok;
}
/*
* copy from _mcc_calc_2_wrole_nego_pattern
* | Wifi slot1 | Wifi slot2 | Bt slot |
* | Dur_r | Dur_a | Bt slot |
* bcn bcn
* | tob_r | toa_r|tob_a | toa_a| Bt slot |
*/
bool _mcc_calc_2wslot_1btslot_nego_p1(struct rtw_phl_mcc_dur_info *ref_dur,
struct rtw_phl_mcc_dur_info *ano_dur, s16 offset,
s16 bt_dur, struct rtw_phl_mcc_pattern *m_pattern)
{
bool cal_ok = false;
s16 tob_r = 0, toa_r = 0, tob_a = 0, toa_a = 0;
if ((ref_dur->dur_limit.enable) && (ano_dur->dur_limit.enable)) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "_mcc_calc_2wslot_1btslot_nego_p1(): not support, both enable slot limitation\n");
/*todo*/
goto exit;
}
if (ref_dur->dur_limit.enable) {
tob_r = ref_dur->dur / 2;
toa_r = ref_dur->dur - tob_r;
if (tob_r > ref_dur->dur_limit.max_tob) {
tob_r = ref_dur->dur_limit.max_tob;
toa_r = ref_dur->dur - tob_r;
}
if (toa_r > ref_dur->dur_limit.max_toa) {
toa_r = ref_dur->dur_limit.max_toa;
tob_r = ref_dur->dur - toa_r;
}
if ((tob_r > ref_dur->dur_limit.max_tob) ||
(toa_r > ref_dur->dur_limit.max_toa)) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "_mcc_calc_2wslot_1btslot_nego_p1(): After adjust tob_r(%d) > max_tob(%d) or toa_r(%d) > max_toa(%d)\n",
tob_r, ref_dur->dur_limit.max_tob,
toa_r, ref_dur->dur_limit.max_toa);
goto exit;
}
tob_a = offset - toa_r;
toa_a = ano_dur->dur - tob_a;
if (tob_a <= 0 || toa_a <= 0) {
m_pattern->courtesy_i.c_en= true;
m_pattern->courtesy_i.c_num = 3;
m_pattern->courtesy_i.src_role = m_pattern->role_ref;
m_pattern->courtesy_i.tgt_role = m_pattern->role_ano;
PHL_TRACE(COMP_PHL_MCC, _PHL_WARNING_, "_mcc_calc_2wslot_1btslot_nego_p1(): Limi by ref dur, courtesy_info: c_en(%d), c_num(%d), src_role->macid(0x%x), tgt_role->macid(0x%x)\n",
m_pattern->courtesy_i.c_en,
m_pattern->courtesy_i.c_num,
m_pattern->courtesy_i.src_role->macid,
m_pattern->courtesy_i.tgt_role->macid);
}
goto fill_pattern;
} else if (ano_dur->dur_limit.enable) {
tob_a = ano_dur->dur / 2;
toa_a = ano_dur->dur - tob_a;
if (tob_a > ano_dur->dur_limit.max_tob) {
tob_a = ano_dur->dur_limit.max_tob;
toa_a = ano_dur->dur - tob_a;
}
if (toa_a > ano_dur->dur_limit.max_toa) {
toa_a = ano_dur->dur_limit.max_toa;
tob_a = ano_dur->dur - toa_a;
}
if ((tob_a > ano_dur->dur_limit.max_tob) ||
(toa_a > ano_dur->dur_limit.max_toa)) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "_mcc_calc_2wslot_1btslot_nego_p1(): After adjust tob_r(%d) > max_tob(%d) or toa_r(%d) > max_toa(%d)\n",
tob_a, ano_dur->dur_limit.max_tob,
toa_a, ano_dur->dur_limit.max_toa);
goto exit;
}
toa_r = offset - tob_a;
tob_r = ref_dur->dur - toa_r;
if (toa_r <= 0 || tob_r <= 0) {
m_pattern->courtesy_i.c_en= true;
m_pattern->courtesy_i.c_num = 3;
m_pattern->courtesy_i.src_role = m_pattern->role_ano;
m_pattern->courtesy_i.tgt_role = m_pattern->role_ref;
PHL_TRACE(COMP_PHL_MCC, _PHL_WARNING_, "_mcc_calc_2wslot_1btslot_nego_p1(): Limi by ano dur, courtesy_info: c_en(%d), c_num(%d), src_role->macid(0x%x), tgt_role->macid(0x%x)\n",
m_pattern->courtesy_i.c_en,
m_pattern->courtesy_i.c_num,
m_pattern->courtesy_i.src_role->macid,
m_pattern->courtesy_i.tgt_role->macid);
}
goto fill_pattern;
} else {
tob_r = ref_dur->dur / 2;
toa_r = ref_dur->dur - tob_r;
tob_a = offset - toa_r;
toa_a = ano_dur->dur - tob_a;
if (tob_a <= 0 || toa_a <= 0) {
m_pattern->courtesy_i.c_en= true;
m_pattern->courtesy_i.c_num = 3;
m_pattern->courtesy_i.src_role = m_pattern->role_ref;
m_pattern->courtesy_i.tgt_role = m_pattern->role_ano;
PHL_TRACE(COMP_PHL_MCC, _PHL_WARNING_, "_mcc_calc_2wslot_1btslot_nego_p1(): courtesy_info: c_en(%d), c_num(%d), src_role->macid(0x%x), tgt_role->macid(0x%x)\n",
m_pattern->courtesy_i.c_en,
m_pattern->courtesy_i.c_num,
m_pattern->courtesy_i.src_role->macid,
m_pattern->courtesy_i.tgt_role->macid);
}
goto fill_pattern;
}
fill_pattern:
cal_ok = true;
m_pattern->tob_r = tob_r;
m_pattern->toa_r = toa_r;
m_pattern->tob_a = tob_a;
m_pattern->toa_a = toa_a;
_mcc_fill_slot_info(m_pattern, false, ref_dur->dur, m_pattern->role_ref);
_mcc_fill_slot_info(m_pattern, false, ano_dur->dur, m_pattern->role_ano);
_mcc_fill_slot_info(m_pattern, true, bt_dur, NULL);
exit:
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_calc_2wslot_1btslot_nego_p1(): calc nego patter ok(%d)\n",
cal_ok);
return cal_ok;
}
/*
* copy from _mcc_calculate_2_clients_pattern
* Calculate pattern2 for 2 wifi slot and 1 bt slot
* Pattern:
* | Wifi slot1 | Bt slot | Wifi slot2 |
* | Dur_r | Bt slot | Dur_a |
* bcn bcn
* | tob_r | toa_r| Bt slot |tob_a | toa_a|
* @offset: The offset of Bcns
* @m_pattern: pattern info
*/
enum rtw_phl_status _mcc_calculate_2wslot_1btslot_pattern2(
struct rtw_phl_mcc_dur_info *ref_dur,
struct rtw_phl_mcc_dur_info *ano_dur,
u16 offset, s16 bt_dur,
struct rtw_phl_mcc_pattern *m_pattern)
{
enum rtw_phl_status status = RTW_PHL_STATUS_FAILURE;
u16 mcc_intvl = ref_dur->dur + ano_dur->dur;
s16 tob_r = 0, toa_r = 0, tob_a = 0, toa_a = 0, tob_r_cand = 0;
s16 d_r = ref_dur->dur, d_a = ano_dur->dur, bcns_offset = offset;
s16 sum = 0, sum_last = 0;
s16 tob_r_l = ref_dur->dur_limit.max_tob;
s16 toa_r_l = ref_dur->dur_limit.max_toa;
s16 tob_a_l = ano_dur->dur_limit.max_tob;
s16 toa_a_l = ano_dur->dur_limit.max_toa;
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_calculate_2wslot_1btslot_pattern2(): ref_dur(%d), ano_dur(%d), bt_dur(%d), bcns offset(%d)\n",
d_r, d_a, bt_dur, bcns_offset);
for (tob_r = EARLY_RX_BCN_T; tob_r < mcc_intvl; tob_r++) {
toa_r = d_r - tob_r;
if (toa_r < MIN_RX_BCN_T) {
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_calculate_2wslot_1btslot_pattern2(): break loop, by toa_r(%d) < MIN_RX_BCN_T(%d)\n",
toa_r, MIN_RX_BCN_T);
break;
}
if (ref_dur->dur_limit.enable) {
if (tob_r > tob_r_l) {
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_calculate_2wslot_1btslot_pattern2(): tob_r(%d) > tob_r_l(%d), Break loop\n",
tob_r, tob_r_l);
break;
} else if (toa_r > toa_r_l) {
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_calculate_2wslot_1btslot_pattern2(): toa_r(%d) > toa_r_l(%d), continue next loop\n",
toa_r, toa_r_l);
continue;
}
}
tob_a = bcns_offset - toa_r - bt_dur;
if (tob_a < EARLY_RX_BCN_T)
continue;
toa_a = d_a - tob_a;
if (toa_a < MIN_RX_BCN_T){
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_calculate_2wslot_1btslot_pattern2(): break loop, by toa_a(%d) < MIN_RX_BCN_T(%d)\n",
toa_a, MIN_RX_BCN_T);
break;
}
if (ano_dur->dur_limit.enable) {
if (tob_a > tob_a_l || toa_a > toa_a_l) {
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_calculate_2wslot_1btslot_pattern2(): tob_a(%d) > tob_a_l(%d) || toa_a(%d) > toa_a_l(%d), continue next loop\n",
tob_a, tob_a_l, toa_a, toa_a_l);
continue;
}
}
sum = ((tob_r - toa_r) * (tob_r - toa_r)) +
((tob_r - tob_a) * (tob_r - tob_a)) +
((tob_r - toa_a) * (tob_r - toa_a)) +
((toa_r - tob_a) * (toa_r - tob_a)) +
((toa_r - toa_a) * (toa_r - toa_a)) +
((tob_a - toa_a) * (tob_a - toa_a));
tob_r_cand = tob_r;
if (sum_last !=0 && sum > sum_last) {
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_calculate_2wslot_1btslot_pattern2(): Find the optimal pattern, by get minSum\n");
break;
}
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_calculate_2wslot_1btslot_pattern2(): tob_r(%d), toa_r(%d), tob_a(%d), toa_a(%d), sum_last(%d), sum(%d)\n",
tob_r, toa_r, tob_a, toa_a, sum_last, sum);
sum_last = sum;
}
if (0 == tob_r_cand) {
PHL_TRACE(COMP_PHL_MCC, _PHL_WARNING_, "_mcc_calculate_2wslot_1btslot_pattern2(): Can't found suitable pattern, goto calc_nego_pattern\n");
goto exit;
}
tob_r = tob_r_cand;
toa_r = d_r - tob_r;
tob_a = bcns_offset - toa_r;
toa_a = d_a - tob_a;
m_pattern->tob_r = tob_r;
m_pattern->toa_r = toa_r;
m_pattern->tob_a = tob_a;
m_pattern->toa_a = toa_a;
/*Update slot order*/
_mcc_fill_slot_info(m_pattern, false, ref_dur->dur, m_pattern->role_ref);
_mcc_fill_slot_info(m_pattern, true, bt_dur, NULL);
_mcc_fill_slot_info(m_pattern, false, ano_dur->dur, m_pattern->role_ano);
status = RTW_PHL_STATUS_SUCCESS;
exit:
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_calculate_2wslot_1btslot_pattern2(): status(%d), tob_r(%d), toa_r(%d), tob_a(%d), toa_a(%d)\n",
status, m_pattern->tob_r, m_pattern->toa_r, m_pattern->tob_a,
m_pattern->toa_a);
return status;
}
/*
* copy from _mcc_calculate_2_clients_pattern
* Calculate pattern1 for 2 wifi slot and 1 bt slot
* Pattern:
* | Wifi slot1 | Wifi slot2 | Bt slot |
* | Dur_r | Dur_a | Bt slot |
* bcn bcn
* | tob_r | toa_r|tob_a | toa_a| Bt slot |
* @offset: The offset of Bcns
* @m_pattern: pattern info
*/
enum rtw_phl_status _mcc_calculate_2wslot_1btslot_pattern1(
struct rtw_phl_mcc_dur_info *ref_dur,
struct rtw_phl_mcc_dur_info *ano_dur,
u16 offset, s16 bt_dur,
struct rtw_phl_mcc_pattern *m_pattern)
{
enum rtw_phl_status status = RTW_PHL_STATUS_FAILURE;
u16 mcc_intvl = ref_dur->dur + ano_dur->dur;
s16 tob_r = 0, toa_r = 0, tob_a = 0, toa_a = 0, tob_r_cand = 0;
s16 d_r = ref_dur->dur, d_a = ano_dur->dur, bcns_offset = offset;
s16 sum = 0, sum_last = 0;
s16 tob_r_l = ref_dur->dur_limit.max_tob;
s16 toa_r_l = ref_dur->dur_limit.max_toa;
s16 tob_a_l = ano_dur->dur_limit.max_tob;
s16 toa_a_l = ano_dur->dur_limit.max_toa;
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_calculate_2wslot_1btslot_pattern1(): ref_dur(%d), ano_dur(%d), bcns offset(%d)\n",
d_r, d_a, bcns_offset);
for (tob_r = EARLY_RX_BCN_T; tob_r < mcc_intvl; tob_r++) {
toa_r = d_r - tob_r;
if (toa_r < MIN_RX_BCN_T) {
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_calculate_2wslot_1btslot_pattern1(): break loop, by toa_r(%d) < MIN_RX_BCN_T(%d)\n",
toa_r, MIN_RX_BCN_T);
break;
}
if (ref_dur->dur_limit.enable) {
if (tob_r > tob_r_l) {
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_calculate_2wslot_1btslot_pattern1(): tob_r(%d) > tob_r_l(%d), Break loop\n",
tob_r, tob_r_l);
break;
} else if (toa_r > toa_r_l) {
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_calculate_2wslot_1btslot_pattern1(): toa_r(%d) > toa_r_l(%d), continue next loop\n",
toa_r, toa_r_l);
continue;
}
}
tob_a = bcns_offset - toa_r;
if (tob_a < EARLY_RX_BCN_T)
continue;
toa_a = d_a - tob_a;
if (toa_a < MIN_RX_BCN_T){
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_calculate_2wslot_1btslot_pattern1(): break loop, by toa_a(%d) < MIN_RX_BCN_T(%d)\n",
toa_a, MIN_RX_BCN_T);
break;
}
if (ano_dur->dur_limit.enable) {
if (tob_a > tob_a_l || toa_a > toa_a_l) {
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_calculate_2wslot_1btslot_pattern1(): tob_a(%d) > tob_a_l(%d) || toa_a(%d) > toa_a_l(%d), continue next loop\n",
tob_a, tob_a_l, toa_a, toa_a_l);
continue;
}
}
sum = ((tob_r - toa_r) * (tob_r - toa_r)) +
((tob_r - tob_a) * (tob_r - tob_a)) +
((tob_r - toa_a) * (tob_r - toa_a)) +
((toa_r - tob_a) * (toa_r - tob_a)) +
((toa_r - toa_a) * (toa_r - toa_a)) +
((tob_a - toa_a) * (tob_a - toa_a));
tob_r_cand = tob_r;
if (sum_last !=0 && sum > sum_last) {
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_calculate_2wslot_1btslot_pattern1(): Find the optimal pattern, by get minSum\n");
break;
}
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_calculate_2wslot_1btslot_pattern1(): tob_r(%d), toa_r(%d), tob_a(%d), toa_a(%d), sum_last(%d), sum(%d)\n",
tob_r, toa_r, tob_a, toa_a, sum_last, sum);
sum_last = sum;
}
if (0 == tob_r_cand) {
PHL_TRACE(COMP_PHL_MCC, _PHL_WARNING_, "_mcc_calculate_2wslot_1btslot_pattern1(): Can't found suitable pattern, goto calc_nego_pattern\n");
goto exit;
}
tob_r = tob_r_cand;
toa_r = d_r - tob_r;
tob_a = bcns_offset - toa_r;
toa_a = d_a - tob_a;
m_pattern->tob_r = tob_r;
m_pattern->toa_r = toa_r;
m_pattern->tob_a = tob_a;
m_pattern->toa_a = toa_a;
_mcc_fill_slot_info(m_pattern, false, ref_dur->dur, m_pattern->role_ref);
_mcc_fill_slot_info(m_pattern, false, ano_dur->dur, m_pattern->role_ano);
_mcc_fill_slot_info(m_pattern, true, bt_dur, NULL);
status = RTW_PHL_STATUS_SUCCESS;
exit:
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_calculate_2wslot_1btslot_pattern1(): status(%d), tob_r(%d), toa_r(%d), tob_a(%d), toa_a(%d)\n",
status, m_pattern->tob_r, m_pattern->toa_r, m_pattern->tob_a,
m_pattern->toa_a);
return status;
}
/*
* Calculate pattern for 2 wifi slot and 1 bt slot
*/
enum rtw_phl_status _mcc_calculate_2wslot_1btslot_pattern(
struct rtw_phl_mcc_dur_info *ref_dur,
struct rtw_phl_mcc_dur_info *ano_dur,
s16 bt_dur, struct rtw_phl_mcc_pattern *m_pattern)
{
enum rtw_phl_status status = RTW_PHL_STATUS_FAILURE;
s16 offset_min = 0, offset_max = 0;
u16 offset = m_pattern->bcns_offset;
_mcc_get_offset_range_for_2wslot_1btslot_p1(ref_dur->dur, ano_dur->dur,
&offset_min, &offset_max);
if (offset >= offset_min && offset <= offset_max) {
if (RTW_PHL_STATUS_SUCCESS !=
_mcc_calculate_2wslot_1btslot_pattern1(ref_dur, ano_dur,
offset, bt_dur, m_pattern)) {
PHL_TRACE(COMP_PHL_MCC, _PHL_WARNING_, "_mcc_calculate_2wslot_1btslot_pattern(): Pattern1, Can't found suitable pattern, goto calc_nego_pattern\n");
goto calc_nego_pattern;
} else {
status = RTW_PHL_STATUS_SUCCESS;
goto exit;
}
}
_mcc_get_offset_range_for_2wslot_1btslot_p2(ref_dur->dur, ano_dur->dur,
bt_dur, &offset_min, &offset_max);
if (offset >= offset_min && offset <= offset_max) {
if (RTW_PHL_STATUS_SUCCESS !=
_mcc_calculate_2wslot_1btslot_pattern2(ref_dur, ano_dur,
offset, bt_dur, m_pattern)) {
PHL_TRACE(COMP_PHL_MCC, _PHL_WARNING_, "_mcc_calculate_2wslot_1btslot_pattern(): Pattern2, Can't found suitable pattern, goto calc_nego_pattern\n");
goto calc_nego_pattern;
} else {
status = RTW_PHL_STATUS_SUCCESS;
goto exit;
}
} else {
PHL_TRACE(COMP_PHL_MCC, _PHL_WARNING_, "_mcc_calculate_2wslot_1btslot_pattern(): worsecase, goto calc_nego_pattern\n");
goto calc_nego_pattern;
}
calc_nego_pattern:
if (_mcc_calc_2wslot_1btslot_nego_p1(ref_dur,
ano_dur, offset, bt_dur, m_pattern)) {
PHL_TRACE(COMP_PHL_MCC, _PHL_WARNING_, "_mcc_calculate_2wslot_1btslot_pattern(): calc_nego_pattern1 ok\n");
} else if (_mcc_calc_2wslot_1btslot_nego_p2(ref_dur,
ano_dur, offset, bt_dur, m_pattern)) {
PHL_TRACE(COMP_PHL_MCC, _PHL_WARNING_, "_mcc_calculate_2wslot_1btslot_pattern(): calc_nego_pattern2 ok\n");
} else {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "_mcc_calculate_2wslot_1btslot_pattern(): calc_nego_pattern fail\n");
goto exit;
}
status = RTW_PHL_STATUS_SUCCESS;
exit:
return status;
}
/*
* |Wifi1 slot| Wifi2 slot|
* < 150TU >< 150TU >
* | Dur_r | Dur_a | Dur_r | Dur_a |
* bcn_r bcn_a
* | tob_r | toa_r| |tob_a | toa_a|
*/
enum rtw_phl_status _mcc_calc_2_clients_worsecase_pattern(u16 ref_dur,
u16 ano_dur, u16 offset, u16 ref_bcn_intvl,
struct rtw_phl_mcc_pattern *m_pattern)
{
enum rtw_phl_status status = RTW_PHL_STATUS_FAILURE;
u16 mcc_intvl = ref_dur + ano_dur;
s16 tob_r = 0, toa_r = 0, tob_a = 0, toa_a = 0;
s16 d_r = ref_dur, d_a = ano_dur, bcns_offset = offset;
s16 sum = 0, sum_last = 0, offset_min = 0, offset_max = 0;
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_calculate_2_clients_worsecase_pattern(): ref_dur(%d), ano_dur(%d), bcns offset(%d), ref_bcn_intvl(%d)\n",
ref_dur, ano_dur, offset, ref_bcn_intvl);
if (ref_bcn_intvl != HANDLE_BCN_INTVL) {
PHL_TRACE(COMP_PHL_MCC, _PHL_WARNING_, "_mcc_calculate_2_clients_worsecase_pattern(): ref_bcn_intvl(%d) != HANDLE_BCN_INTVL(%d), now, we can't calculate the pattern\n",
ref_bcn_intvl, HANDLE_BCN_INTVL);
goto exit;
}
_mcc_get_offset_range_for_2_clients_worsecase(d_r, d_a, ref_bcn_intvl,
&offset_min, &offset_max);
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_calculate_2_clients_worsecase_pattern(): we can calculate the range of bcn offset is %d~%d\n",
offset_min, offset_max);
if ((bcns_offset >= offset_min) && (bcns_offset <= offset_max))
goto calc;
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_calculate_2_clients_worsecase_pattern(): transform bcn offset from %d to %d\n",
bcns_offset, ref_bcn_intvl - bcns_offset);
/*bcn offfset = 85, we can transform to -15*/
bcns_offset = ref_bcn_intvl - bcns_offset;
if (bcns_offset >= offset_min && offset_min <=offset_max) {
goto calc;
} else {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "_mcc_calculate_2_clients_worsecase_pattern(): bcn offset out of range, we can't calculate it\n");
goto exit;
}
calc:
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_calculate_2_clients_worsecase_pattern(): Start calculate\n");
for (tob_r = EARLY_RX_BCN_T; tob_r < mcc_intvl; tob_r++) {
toa_r = d_r - tob_r;
if (toa_r < MIN_RX_BCN_T) {
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_calculate_2_clients_worsecase_pattern(): Find the optimal pattern, by toa_r(%d) < MIN_RX_BCN_T(%d)\n",
toa_r, MIN_RX_BCN_T);
break;
}
tob_a = bcns_offset + 2 * ref_bcn_intvl - toa_r - mcc_intvl;
if (tob_a < EARLY_RX_BCN_T)
continue;
toa_a = d_a - tob_a;
if (toa_a < MIN_RX_BCN_T){
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_calculate_2_clients_worsecase_pattern(): Find the optimal pattern, by toa_a(%d) < MIN_RX_BCN_T(%d)\n",
toa_a, MIN_RX_BCN_T);
break;
}
sum = ((tob_r - toa_r) * (tob_r - toa_r)) +
((tob_r - tob_a) * (tob_r - tob_a)) +
((tob_r - toa_a) * (tob_r - toa_a)) +
((toa_r - tob_a) * (toa_r - tob_a)) +
((toa_r - toa_a) * (toa_r - toa_a)) +
((tob_a - toa_a) * (tob_a - toa_a));
if (sum_last !=0 && sum > sum_last) {
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_calculate_2_clients_worsecase_pattern(): Find the optimal pattern, by get minSum\n");
break;
}
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_calculate_2_clients_worsecase_pattern(): tob_r(%d), toa_r(%d), tob_a(%d), toa_a(%d), sum_last(%d), sum(%d)\n",
tob_r, toa_r, tob_a, toa_a, sum_last, sum);
sum_last = sum;
}
tob_r = tob_r - 1;
toa_r = d_r - tob_r;
tob_a = bcns_offset + 2 * ref_bcn_intvl - toa_r - mcc_intvl;
toa_a = d_a - tob_a;
m_pattern->tob_r = (u8)tob_r;
m_pattern->toa_r = (u8)toa_r;
m_pattern->tob_a = (u8)tob_a;
m_pattern->toa_a = (u8)toa_a;
_mcc_fill_slot_info(m_pattern, false, ref_dur, m_pattern->role_ref);
_mcc_fill_slot_info(m_pattern, false, ano_dur, m_pattern->role_ano);
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_calculate_2_clients_worsecase_pattern(): Result, tob_r(%d), toa_r(%d), tob_a(%d), toa_a(%d)\n",
tob_r, toa_r, tob_a, toa_a);
status = RTW_PHL_STATUS_SUCCESS;
exit:
if (status != RTW_PHL_STATUS_SUCCESS)
m_pattern->calc_fail = true;
return status;
}
#if 0
void _mcc_fill_bt_slot(struct phl_mcc_info *minfo,
struct rtw_phl_mcc_dur_info *ref_dur,
struct rtw_phl_mcc_dur_info *ano_dur)
{
struct rtw_phl_mcc_bt_info *bt_info = &minfo->bt_info;
struct rtw_phl_mcc_en_info *en_info = &minfo->en_info;
struct rtw_phl_mcc_pattern *m_pattern = &en_info->m_pattern;
u8 i = 0;
s16 spacing = 0;
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, ">>> _mcc_fill_bt_slot()\n");
if (false == bt_info->add_bt_role)
goto exit;
spacing = m_pattern->bcns_offset - m_pattern->toa_r - m_pattern->tob_a;
if (0 < spacing) {
if (bt_info->bt_dur < spacing) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "_mcc_fill_bt_slot(): bt_info->bt_dur(%d) < spacing(%d), adjust BT dur to (%d), please check code\n",
bt_info->bt_dur, spacing, spacing);
bt_info->bt_dur = spacing;
bt_info->bt_seg[0] = bt_info->bt_dur;
bt_info->bt_seg_num = 1;
goto exit;
}
bt_info->bt_seg[0] = spacing;
bt_info->bt_seg[1] = bt_info->bt_dur - bt_info->bt_seg[0];
bt_info->bt_seg_num = 2;
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_fill_bt_slot(): Segment Bt dur to Seg1(%d), Seg2(%d)\n",
bt_info->bt_seg[0], bt_info->bt_seg[1]);
} else if (0 == spacing){
bt_info->bt_seg[0] = bt_info->bt_dur;
bt_info->bt_seg_num = 1;
} else {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "_mcc_fill_bt_slot(): spacing(%d) < 0, please check code,\n",
spacing);
}
exit:
_mcc_dump_bt_ino(bt_info);
return;
}
#endif
/*
* |Wifi1 slot|Wifi2 slot|
*/
bool _mcc_calc_2_wrole_nego_pattern(struct rtw_phl_mcc_dur_info *ref_dur,
struct rtw_phl_mcc_dur_info *ano_dur, s16 offset,
struct rtw_phl_mcc_pattern *m_pattern)
{
bool cal_ok = false;
s16 tob_r = 0, toa_r = 0, tob_a = 0, toa_a = 0;
if ((!ref_dur->dur_limit.enable) && (!ano_dur->dur_limit.enable)) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "_mcc_calc_2_wrole_nego_pattern(): not support, both not enable slot limitation\n");
/*todo*/
goto exit;
}
if ((ref_dur->dur_limit.enable) && (ano_dur->dur_limit.enable)) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "_mcc_calc_2_wrole_nego_pattern(): not support, both enable slot limitation\n");
/*todo*/
goto exit;
}
if ((ref_dur->dur_limit.enable) && (!ano_dur->dur_limit.enable)) {
tob_r = ref_dur->dur / 2;
toa_r = ref_dur->dur - tob_r;
if (tob_r > ref_dur->dur_limit.max_tob) {
tob_r = ref_dur->dur_limit.max_tob;
toa_r = ref_dur->dur - tob_r;
}
if (toa_r > ref_dur->dur_limit.max_toa) {
toa_r = ref_dur->dur_limit.max_toa;
tob_r = ref_dur->dur - toa_r;
}
if ((tob_r > ref_dur->dur_limit.max_tob) ||
(toa_r > ref_dur->dur_limit.max_toa)) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "_mcc_calc_2_wrole_nego_pattern(): After adjust tob_r(%d) > max_tob(%d) or toa_r(%d) > max_toa(%d)\n",
tob_r, ref_dur->dur_limit.max_tob,
toa_r, ref_dur->dur_limit.max_toa);
goto exit;
}
tob_a = offset - toa_r;
toa_a = ano_dur->dur - tob_a;
if (tob_a <= 0 || toa_a <= 0) {
m_pattern->courtesy_i.c_en= true;
m_pattern->courtesy_i.c_num = 3;
m_pattern->courtesy_i.src_role = m_pattern->role_ref;
m_pattern->courtesy_i.tgt_role = m_pattern->role_ano;
PHL_TRACE(COMP_PHL_MCC, _PHL_WARNING_, "_mcc_calc_2_wrole_nego_pattern(): Limi by ref dur, courtesy_info: c_en(%d), c_num(%d), src_role->macid(0x%x), tgt_role->macid(0x%x)\n",
m_pattern->courtesy_i.c_en,
m_pattern->courtesy_i.c_num,
m_pattern->courtesy_i.src_role->macid,
m_pattern->courtesy_i.tgt_role->macid);
}
goto fill_pattern;
}
if ((!ref_dur->dur_limit.enable) && (ano_dur->dur_limit.enable)) {
tob_a = ano_dur->dur / 2;
toa_a = ano_dur->dur - tob_a;
if (tob_a > ano_dur->dur_limit.max_tob) {
tob_a = ano_dur->dur_limit.max_tob;
toa_a = ano_dur->dur - tob_a;
}
if (toa_a > ano_dur->dur_limit.max_toa) {
toa_a = ano_dur->dur_limit.max_toa;
tob_a = ano_dur->dur - toa_a;
}
if ((tob_a > ano_dur->dur_limit.max_tob) ||
(toa_a > ano_dur->dur_limit.max_toa)) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "_mcc_calc_2_wrole_nego_pattern(): After adjust tob_r(%d) > max_tob(%d) or toa_r(%d) > max_toa(%d)\n",
tob_a, ano_dur->dur_limit.max_tob,
toa_a, ano_dur->dur_limit.max_toa);
goto exit;
}
toa_r = offset - tob_a;
tob_r = ref_dur->dur - toa_r;
if (toa_r <= 0 || tob_r <= 0) {
m_pattern->courtesy_i.c_en= true;
m_pattern->courtesy_i.c_num = 3;
m_pattern->courtesy_i.src_role = m_pattern->role_ano;
m_pattern->courtesy_i.tgt_role = m_pattern->role_ref;
PHL_TRACE(COMP_PHL_MCC, _PHL_WARNING_, "_mcc_calc_2_wrole_nego_pattern(): Limi by ano dur, courtesy_info: c_en(%d), c_num(%d), src_role->macid(0x%x), tgt_role->macid(0x%x)\n",
m_pattern->courtesy_i.c_en,
m_pattern->courtesy_i.c_num,
m_pattern->courtesy_i.src_role->macid,
m_pattern->courtesy_i.tgt_role->macid);
}
goto fill_pattern;
}
fill_pattern:
cal_ok = true;
m_pattern->tob_r = tob_r;
m_pattern->toa_r = toa_r;
m_pattern->tob_a = tob_a;
m_pattern->toa_a = toa_a;
_mcc_fill_slot_info(m_pattern, false, ref_dur->dur, m_pattern->role_ref);
_mcc_fill_slot_info(m_pattern, false, ano_dur->dur, m_pattern->role_ano);
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_calc_2_wrole_nego_pattern(): calc nego patter\n");
exit:
return cal_ok;
}
#if 0
/**
* Calculate the optimal pattern for 2wrole MCC with limitation of time slot v2
* @ref_dur: Duration info of reference ch
* @ano_dur: Duration info of another ch
* @offset: The offset between beacon of ref_role and beacon of ano_role
* @m_pattern: mcc pattern.
**/
enum rtw_phl_status _mcc_calculate_2_wrole_pattern_v2(
struct rtw_phl_mcc_dur_info *ref_dur,
struct rtw_phl_mcc_dur_info *ano_dur,
u16 offset,
struct rtw_phl_mcc_pattern *m_pattern)
{
enum rtw_phl_status status = RTW_PHL_STATUS_FAILURE;
s16 tob_r = 0, toa_r = 0, tob_a = 0, toa_a = 0;
s16 tob_r_cand = 0, toa_r_cand = 0, tob_a_cand = 0, toa_a_cand = 0;
s16 d_r = ref_dur->dur, d_a = ano_dur->dur, bcns_offset = offset;
s16 sum = 0, sum_last = 0;
s16 tob_r_min = _mcc_is_ap_category(m_pattern->role_ref->wrole) ?
EARLY_TX_BCN_T : EARLY_RX_BCN_T;
s16 toa_r_min = _mcc_is_ap_category(m_pattern->role_ref->wrole) ?
MIN_TX_BCN_T : MIN_RX_BCN_T;
s16 tob_a_min = _mcc_is_ap_category(m_pattern->role_ano->wrole) ?
EARLY_TX_BCN_T : EARLY_RX_BCN_T;
s16 toa_a_min = _mcc_is_ap_category(m_pattern->role_ano->wrole) ?
MIN_TX_BCN_T : MIN_RX_BCN_T;
s16 min_bcns_offset = toa_r_min + tob_a_min;
s16 i = 0, cnt = 0;
bool bdry_r = false, bdry_a = false; /*reach boundary edge*/
if ((bcns_offset > (m_pattern->role_ref->bcn_intvl - min_bcns_offset))
|| (bcns_offset < min_bcns_offset)) {
PHL_TRACE(COMP_PHL_MCC, _PHL_WARNING_, "_mcc_calculate_2_wrole_pattern_v2(): bcns_offset(%d) > max_offset(%d) || bcns_offset < min_offset(%d), goto calc_nego_pattern\n",
bcns_offset,
(m_pattern->role_ref->bcn_intvl - min_bcns_offset),
min_bcns_offset);
goto calc_nego_pattern;
}
cnt = bcns_offset - toa_r_min - tob_a_min;
toa_r = toa_r_min;
tob_a = tob_a_min;
for (i = 0; i < cnt; i++) {
if ((true == bdry_r) && (true == bdry_a)) {
PHL_TRACE(COMP_PHL_MCC, _PHL_WARNING_, "_mcc_calculate_2_wrole_pattern_v2(): braek loop by (true == bdry_r) && (true == bdry_a)\n");
break;
}
if (true == bdry_r)
goto calc_ano;
if (i > 0)
toa_r++;
tob_r = d_r - toa_r;
if (tob_r < tob_r_min) {
bdry_r = true;
goto calc_ano;
}
if (!ref_dur->dur_limit.enable)
goto check_conflict1;
if ((tob_r > ref_dur->dur_limit.max_tob) ||
(toa_r > ref_dur->dur_limit.max_toa)) {
goto calc_ano;
}
check_conflict1:
if ((toa_r + tob_a_cand) > bcns_offset) {
PHL_TRACE(COMP_PHL_MCC, _PHL_WARNING_, "_mcc_calculate_2_wrole_pattern_v2(): braek loop by conflict(toa_r(%d) + tob_a_cand(%d)) > bcns_offset(%d)\n",
toa_r, tob_a_cand, bcns_offset);
break;
}
toa_r_cand = toa_r;
calc_ano:
if (true == bdry_a)
continue;
if (i > 0)
tob_a++;
toa_a = d_a - tob_a;
if (toa_a < toa_a_min) {
bdry_a = true;
continue;
}
if (!ano_dur->dur_limit.enable)
goto check_conflict2;
if ((tob_a > ano_dur->dur_limit.max_tob) ||
(toa_a > ano_dur->dur_limit.max_toa)) {
continue;
}
check_conflict2:
if ((bcns_offset - (tob_a + toa_r_cand)) >
(m_pattern->d_r_d_a_spacing_max)) {
continue;
}
if ((tob_a + toa_r_cand) > bcns_offset) {
PHL_TRACE(COMP_PHL_MCC, _PHL_WARNING_, "_mcc_calculate_2_wrole_pattern_v2(): braek loop by conflict(tob_a(%d) + toa_r_cand(%d)) > bcns_offset(%d)\n",
tob_a, toa_r_cand, bcns_offset);
break;
}
tob_a_cand = tob_a;
/*calculate candidate result*/
tob_r_cand = d_r - toa_r_cand;
toa_a_cand = d_a - tob_a_cand;
sum = ((tob_r_cand - toa_r_cand) * (tob_r_cand - toa_r_cand)) +
((tob_r_cand - tob_a_cand) * (tob_r_cand - tob_a_cand)) +
((tob_r_cand - toa_a_cand) * (tob_r_cand - toa_a_cand)) +
((toa_r_cand - tob_a_cand) * (toa_r_cand - tob_a_cand)) +
((toa_r_cand - toa_a_cand) * (toa_r_cand - toa_a_cand)) +
((tob_a_cand - toa_a_cand) * (tob_a_cand - toa_a_cand));
if (sum_last !=0 && sum > sum_last) {
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_calculate_2_wrole_pattern_v2(): Find the optimal pattern, by get minSum\n");
break;
}
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_calculate_2_wrole_pattern_v2(): tob_r_cand(%d), toa_r_cand(%d), tob_a_cand(%d), toa_a_cand(%d), sum_last(%d), sum(%d)\n",
tob_r_cand, toa_r_cand, tob_a_cand, toa_a_cand,
sum_last, sum);
sum_last = sum;
}
if ((0 == toa_r_cand) || (0 == tob_a_cand)) {
PHL_TRACE(COMP_PHL_MCC, _PHL_WARNING_, "_mcc_calculate_2_wrole_pattern_v2(): Can't found suitable pattern, goto calc_nego_pattern\n");
goto calc_nego_pattern;
}
m_pattern->tob_r = tob_r_cand;
m_pattern->toa_r = toa_r_cand;
m_pattern->tob_a = tob_a_cand;
m_pattern->toa_a = toa_a_cand;
_mcc_fill_slot_info(m_pattern, false, ref_dur->dur, m_pattern->role_ref);
_mcc_fill_slot_info(m_pattern, false, ano_dur->dur, m_pattern->role_ano);
status = RTW_PHL_STATUS_SUCCESS;
goto exit;
calc_nego_pattern:
if (_mcc_calc_2_wrole_nego_pattern(ref_dur, ano_dur, bcns_offset,
m_pattern)) {
status = RTW_PHL_STATUS_SUCCESS;
}
exit:
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_calculate_2_wrole_pattern_v2(): m_pattern: tob_r(%d), toa_r(%d), tob_a(%d), toa_a(%d)\n",
m_pattern->tob_r, m_pattern->toa_r, m_pattern->tob_a,
m_pattern->toa_a);
return status;
}
#endif
#if 0
/**
* Copy from _mcc_calc_2_clients_worsecase_pattern and add limitation of time slot
* worsecase: TDMA interval is 150 TU
* Calculate the optimal pattern for 2wifi slot with limitation of time slot for worsecase
* @ref_dur: Duration info of reference slot
* @ano_dur: Duration info of another slot
* @offset: The offset between beacon of ref_role and beacon of ano_role
* @ref_bcn_intvl: Bcn interval of reference role
* @m_pattern: mcc pattern.
**/
enum rtw_phl_status _mcc_calc_2_wifi_slot_worsecase_pattern(
struct rtw_phl_mcc_dur_info *ref_dur,
struct rtw_phl_mcc_dur_info *ano_dur, u16 offset,
u16 ref_bcn_intvl, struct rtw_phl_mcc_pattern *m_pattern)
{
enum rtw_phl_status status = RTW_PHL_STATUS_FAILURE;
u16 mcc_intvl = ref_dur->dur + ano_dur->dur;
s16 tob_r = 0, toa_r = 0, tob_a = 0, toa_a = 0, tob_r_cand = 0;
s16 d_r = ref_dur->dur, d_a = ano_dur->dur, bcns_offset = offset;
s16 sum = 0, sum_last = 0, offset_min = 0, offset_max = 0;
s16 tob_r_l = ref_dur->dur_limit.max_tob;
s16 toa_r_l = ref_dur->dur_limit.max_toa;
s16 tob_a_l = ano_dur->dur_limit.max_tob;
s16 toa_a_l = ano_dur->dur_limit.max_toa;
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_calc_2_wifi_slot_worsecase_pattern(): ref_dur(%d), ano_dur(%d), bcns offset(%d), ref_bcn_intvl(%d)\n",
d_r, d_a, offset, ref_bcn_intvl);
if (ref_bcn_intvl != HANDLE_BCN_INTVL) {
PHL_TRACE(COMP_PHL_MCC, _PHL_WARNING_, "_mcc_calc_2_wifi_slot_worsecase_pattern(): ref_bcn_intvl(%d) != HANDLE_BCN_INTVL(%d), now, we can't calculate the pattern\n",
ref_bcn_intvl, HANDLE_BCN_INTVL);
goto exit;
}
_mcc_get_offset_range_for_2_clients_worsecase(d_r, d_a, ref_bcn_intvl,
&offset_min, &offset_max);
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_calc_2_wifi_slot_worsecase_pattern(): we can calculate the range of bcn offset is %d~%d\n",
offset_min, offset_max);
if ((bcns_offset >= offset_min) && (bcns_offset <= offset_max))
goto calc;
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_calc_2_wifi_slot_worsecase_pattern(): transform bcn offset from %d to %d\n",
bcns_offset, ref_bcn_intvl - bcns_offset);
/*bcn offfset = 85, we can transform to -15*/
bcns_offset = ref_bcn_intvl - bcns_offset;
if (bcns_offset >= offset_min && offset_min <=offset_max) {
goto calc;
} else {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "_mcc_calc_2_wifi_slot_worsecase_pattern(): bcn offset out of range, we can't calculate it\n");
goto exit;
}
calc:
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_calc_2_wifi_slot_worsecase_pattern(): Start calculate\n");
for (tob_r = EARLY_RX_BCN_T; tob_r < mcc_intvl; tob_r++) {
toa_r = d_r - tob_r;
if (toa_r < MIN_RX_BCN_T) {
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_calc_2_wifi_slot_worsecase_pattern(): Break loop, by toa_r(%d) < MIN_RX_BCN_T(%d)\n",
toa_r, MIN_RX_BCN_T);
break;
}
if (ref_dur->dur_limit.enable) {
if (toa_r > toa_r_l) {
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_calc_2_wifi_slot_worsecase_pattern(): toa_r(%d) > toa_r_l(%d), continue next loop\n",
toa_r, toa_r_l);
continue;
}
if (tob_r > tob_r_l) {
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_calc_2_wifi_slot_worsecase_pattern(): Break loop, tob_r(%d) > tob_r_l(%d)\n",
tob_r, tob_r_l);
break;
}
}
tob_a = bcns_offset + 2 * ref_bcn_intvl - toa_r - mcc_intvl;
if (tob_a < EARLY_RX_BCN_T)
continue;
toa_a = d_a - tob_a;
if (toa_a < MIN_RX_BCN_T){
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_calc_2_wifi_slot_worsecase_pattern(): Break loop, by toa_a(%d) < MIN_RX_BCN_T(%d)\n",
toa_a, MIN_RX_BCN_T);
break;
}
if (ano_dur->dur_limit.enable) {
if (tob_a > tob_a_l || toa_a > toa_a_l) {
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_calc_2_wifi_slot_worsecase_pattern(): tob_a(%d) > tob_a_l(%d) || toa_a(%d) > toa_a_l(%d), continue next loop\n",
tob_a, tob_a_l, toa_a, toa_a_l);
continue;
}
}
sum = ((tob_r - toa_r) * (tob_r - toa_r)) +
((tob_r - tob_a) * (tob_r - tob_a)) +
((tob_r - toa_a) * (tob_r - toa_a)) +
((toa_r - tob_a) * (toa_r - tob_a)) +
((toa_r - toa_a) * (toa_r - toa_a)) +
((tob_a - toa_a) * (tob_a - toa_a));
tob_r_cand = tob_r;
if (sum_last !=0 && sum > sum_last) {
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_calc_2_wifi_slot_worsecase_pattern(): Find the optimal pattern, by get minSum\n");
break;
}
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_calc_2_wifi_slot_worsecase_pattern(): tob_r(%d), toa_r(%d), tob_a(%d), toa_a(%d), sum_last(%d), sum(%d)\n",
tob_r, toa_r, tob_a, toa_a, sum_last, sum);
sum_last = sum;
}
if (0 == tob_r_cand) {
PHL_TRACE(COMP_PHL_MCC, _PHL_WARNING_, "_mcc_calc_2_wifi_slot_worsecase_pattern(): Can't found suitable pattern, goto calc_nego_pattern\n");
goto calc_nego_pattern;
}
tob_r = tob_r_cand;
toa_r = d_r - tob_r;
tob_a = bcns_offset + 2 * ref_bcn_intvl - toa_r - mcc_intvl;
toa_a = d_a - tob_a;
m_pattern->tob_r = tob_r;
m_pattern->toa_r = toa_r;
m_pattern->tob_a = tob_a;
m_pattern->toa_a = toa_a;
_mcc_fill_slot_info(m_pattern, false, ref_dur->dur, m_pattern->role_ref);
_mcc_fill_slot_info(m_pattern, false, ano_dur->dur, m_pattern->role_ano);
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_calc_2_wifi_slot_worsecase_pattern(): Result, tob_r(%d), toa_r(%d), tob_a(%d), toa_a(%d)\n",
tob_r, toa_r, tob_a, toa_a);
status = RTW_PHL_STATUS_SUCCESS;
goto exit;
calc_nego_pattern:
if (_mcc_calc_2_wrole_nego_pattern(ref_dur, ano_dur, bcns_offset,
m_pattern))
status = RTW_PHL_STATUS_SUCCESS;
exit:
if (status != RTW_PHL_STATUS_SUCCESS)
m_pattern->calc_fail = true;
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_calc_2_wifi_slot_worsecase_pattern(): status(%d)\n",
status);
return status;
}
#endif
/*
* Calculate the optimal pattern for 2wrole MCC with limitation of time slot
* @ref_dur: Duration of reference ch
* @ano_dur: Duration of another ch
* @offset: The offset between beacon of client1 and beacon of client2
* @m_pattern: mcc pattern.
* | Wifi1 slot | Wifi2 slot | Wifi1 slot | Wifi2 slot |
* <tob_r> Bcn_r <toa_r> <tob_r> Bcn_r <toa_r>
* <tob_a> Bcn_a <toa_a> <tob_a> Bcn_a <toa_a>
* < bcns_offset >
*/
enum rtw_phl_status _mcc_calculate_2_wrole_pattern(
struct rtw_phl_mcc_dur_info *ref_dur,
struct rtw_phl_mcc_dur_info *ano_dur,
u16 offset,
struct rtw_phl_mcc_pattern *m_pattern)
{
enum rtw_phl_status status = RTW_PHL_STATUS_FAILURE;
u16 mcc_intvl = ref_dur->dur + ano_dur->dur;
s16 tob_r = 0, toa_r = 0, tob_a = 0, toa_a = 0, tob_r_cand = 0;
s16 d_r = ref_dur->dur, d_a = ano_dur->dur, bcns_offset = offset;
s16 sum = 0, sum_last = 0;
s16 tob_r_l = ref_dur->dur_limit.max_tob;
s16 toa_r_l = ref_dur->dur_limit.max_toa;
s16 tob_a_l = ano_dur->dur_limit.max_tob;
s16 toa_a_l = ano_dur->dur_limit.max_toa;
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_calculate_2_wrole_pattern(): ref_dur(%d), ano_dur(%d), bcns offset(%d)\n",
d_r, d_a, bcns_offset);
for (tob_r = EARLY_RX_BCN_T; tob_r < mcc_intvl; tob_r++) {
toa_r = d_r - tob_r;
if (toa_r < MIN_RX_BCN_T) {
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_calculate_2_wrole_pattern(): break loop, by toa_r(%d) < MIN_RX_BCN_T(%d)\n",
toa_r, MIN_RX_BCN_T);
break;
}
if (ref_dur->dur_limit.enable) {
if (tob_r > tob_r_l || toa_r > toa_r_l) {
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_calculate_2_wrole_pattern(): tob_r(%d) > tob_r_l(%d) || toa_r(%d) > toa_r_l(%d), continue next loop\n",
tob_r, tob_r_l, toa_r, toa_r_l);
continue;
}
}
tob_a = bcns_offset - toa_r;
if (tob_a < EARLY_RX_BCN_T)
continue;
toa_a = d_a - tob_a;
if (toa_a < MIN_RX_BCN_T){
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_calculate_2_wrole_pattern(): break loop, by toa_a(%d) < MIN_RX_BCN_T(%d)\n",
toa_a, MIN_RX_BCN_T);
break;
}
if (ano_dur->dur_limit.enable) {
if (tob_a > tob_a_l || toa_a > toa_a_l) {
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_calculate_2_wrole_pattern(): tob_a(%d) > tob_a_l(%d) || toa_a(%d) > toa_a_l(%d), continue next loop\n",
tob_a, tob_a_l, toa_a, toa_a_l);
continue;
}
}
sum = ((tob_r - toa_r) * (tob_r - toa_r)) +
((tob_r - tob_a) * (tob_r - tob_a)) +
((tob_r - toa_a) * (tob_r - toa_a)) +
((toa_r - tob_a) * (toa_r - tob_a)) +
((toa_r - toa_a) * (toa_r - toa_a)) +
((tob_a - toa_a) * (tob_a - toa_a));
tob_r_cand = tob_r;
if (sum_last !=0 && sum > sum_last) {
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_calculate_2_wrole_pattern(): Find the optimal pattern, by get minSum\n");
break;
}
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_calculate_2_wrole_pattern(): tob_r(%d), toa_r(%d), tob_a(%d), toa_a(%d), sum_last(%d), sum(%d)\n",
tob_r, toa_r, tob_a, toa_a, sum_last, sum);
sum_last = sum;
}
if (0 == tob_r_cand) {
PHL_TRACE(COMP_PHL_MCC, _PHL_WARNING_, "_mcc_calculate_2_wrole_pattern(): Can't found suitable pattern, goto calc_nego_pattern\n");
goto calc_nego_pattern;
}
tob_r = tob_r_cand;
toa_r = d_r - tob_r;
tob_a = bcns_offset - toa_r;
toa_a = d_a - tob_a;
m_pattern->tob_r = tob_r;
m_pattern->toa_r = toa_r;
m_pattern->tob_a = tob_a;
m_pattern->toa_a = toa_a;
_mcc_fill_slot_info(m_pattern, false, ref_dur->dur, m_pattern->role_ref);
_mcc_fill_slot_info(m_pattern, false, ano_dur->dur, m_pattern->role_ano);
status = RTW_PHL_STATUS_SUCCESS;
goto exit;
calc_nego_pattern:
if (_mcc_calc_2_wrole_nego_pattern(ref_dur, ano_dur, bcns_offset,
m_pattern))
status = RTW_PHL_STATUS_SUCCESS;
exit:
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_calculate_2_clients_pattern(): tob_r(%d), toa_r(%d), tob_a(%d), toa_a(%d)\n",
m_pattern->tob_r, m_pattern->toa_r, m_pattern->tob_a,
m_pattern->toa_a);
return status;
}
/*
* Calculate the optimal pattern for client+client MCC
* @ref_dur: Duration of reference ch
* @ano_dur: Duration of another ch
* @offset: The offset between beacon of client1 and beacon of client2
* @m_pattern: mcc pattern.
* | Wifi1 slot | Wifi2 slot | Wifi1 slot | Wifi2 slot |
* <tob_r> Bcn_r <toa_r> <tob_r> Bcn_r <toa_r>
* <tob_a> Bcn_a <toa_a> <tob_a> Bcn_a <toa_a>
* < bcns_offset >
*/
void _mcc_calculate_2_clients_pattern(u16 ref_dur, u16 ano_dur, u16 offset,
struct rtw_phl_mcc_pattern *m_pattern)
{
u16 mcc_intvl = ref_dur + ano_dur;
s16 tob_r = 0, toa_r = 0, tob_a = 0, toa_a = 0;
s16 d_r = ref_dur, d_a = ano_dur, bcns_offset = offset;
s16 sum = 0, sum_last = 0;
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_calculate_2_clients_pattern(): ref_dur(%d), ano_dur(%d), bcns offset(%d)\n",
ref_dur, ano_dur, offset);
for (tob_r = EARLY_RX_BCN_T; tob_r < mcc_intvl; tob_r++) {
toa_r = d_r - tob_r;
if (toa_r < MIN_RX_BCN_T) {
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_calculate_2_clients_pattern(): Find the optimal pattern, by toa_r(%d) < MIN_RX_BCN_T(%d)\n",
toa_r, MIN_RX_BCN_T);
break;
}
tob_a = bcns_offset - toa_r;
if (tob_a < EARLY_RX_BCN_T)
continue;
toa_a = d_a - tob_a;
if (toa_a < MIN_RX_BCN_T){
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_calculate_2_clients_pattern(): Find the optimal pattern, by toa_a(%d) < MIN_RX_BCN_T(%d)\n",
toa_a, MIN_RX_BCN_T);
break;
}
sum = ((tob_r - toa_r) * (tob_r - toa_r)) +
((tob_r - tob_a) * (tob_r - tob_a)) +
((tob_r - toa_a) * (tob_r - toa_a)) +
((toa_r - tob_a) * (toa_r - tob_a)) +
((toa_r - toa_a) * (toa_r - toa_a)) +
((tob_a - toa_a) * (tob_a - toa_a));
if (sum_last !=0 && sum > sum_last) {
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_calculate_2_clients_pattern(): Find the optimal pattern, by get minSum\n");
break;
}
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_calculate_2_clients_pattern(): tob_r(%d), toa_r(%d), tob_a(%d), toa_a(%d), sum_last(%d), sum(%d)\n",
tob_r, toa_r, tob_a, toa_a, sum_last, sum);
sum_last = sum;
}
tob_r = tob_r - 1;
toa_r = d_r - tob_r;
tob_a = bcns_offset - toa_r;
toa_a = d_a - tob_a;
m_pattern->tob_r = (u8)tob_r;
m_pattern->toa_r = (u8)toa_r;
m_pattern->tob_a = (u8)tob_a;
m_pattern->toa_a = (u8)toa_a;
_mcc_fill_slot_info(m_pattern, false, ref_dur, m_pattern->role_ref);
_mcc_fill_slot_info(m_pattern, false, ano_dur, m_pattern->role_ano);
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_calculate_2_clients_pattern(): tob_r(%d), toa_r(%d), tob_a(%d), toa_a(%d)\n",
tob_r, toa_r, tob_a, toa_a);
}
/*
* Fill patten info for 2wifi slot req + bt slot req
* @minfo: enable mcc info
* @role_ref: reference wifi slot req
* @role_ano: another wifi slot req
*/
void _mcc_fill_2_wrole_bt_pattern(struct phl_mcc_info *minfo,
struct rtw_phl_mcc_role *role_ref, struct rtw_phl_mcc_role *role_ano)
{
struct rtw_phl_mcc_en_info *en_info = &minfo->en_info;
struct rtw_phl_mcc_pattern *m_pattern = &en_info->m_pattern;
struct rtw_phl_mcc_dur_info dur_ref = role_ref->policy.dur_info;
struct rtw_phl_mcc_dur_info dur_ano = role_ano->policy.dur_info;
#if 0
u16 dr_max = (dur_ref.dur_limit.enable) ?
dur_ref.dur_limit.max_dur : en_info->mcc_intvl;
u16 da_max = (dur_ano.dur_limit.enable) ?
dur_ano.dur_limit.max_dur : en_info->mcc_intvl;
if (minfo->bt_info.bt_seg_num == 2) {
/*temporary disable this case, we can't handle it in GC(NOA)+STA*/
minfo->bt_info.add_bt_role = true;
if (((dur_ref.dur + minfo->bt_info.bt_seg[0]) <= dr_max) &&
((dur_ano.dur + minfo->bt_info.bt_seg[1]) <= da_max)) {
dur_ref.dur += minfo->bt_info.bt_seg[0];
dur_ano.dur += minfo->bt_info.bt_seg[1];
} else if ((dur_ref.dur + minfo->bt_info.bt_dur) <= dr_max) {
dur_ref.dur += minfo->bt_info.bt_dur;
} else if ((dur_ano.dur + minfo->bt_info.bt_dur) <= da_max) {
dur_ano.dur += minfo->bt_info.bt_dur;
} else {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "_mcc_fill_2_wrole_bt_pattern(): Fail to combine wifi slot and bt slot to single slot, we use default worsecase pattern \n");
_mcc_set_2_clients_worsecase_default_pattern(m_pattern, dur_ref.dur);
goto exit;
}
if (RTW_PHL_STATUS_SUCCESS !=
_mcc_calc_2_wifi_slot_worsecase_pattern(
&dur_ref, &dur_ano,
m_pattern->bcns_offset,
role_ref->bcn_intvl,
m_pattern)) {
_mcc_set_2_clients_worsecase_default_pattern(m_pattern, dur_ref.dur);
PHL_TRACE(COMP_PHL_MCC, _PHL_WARNING_, "_mcc_fill_2_wrole_bt_pattern(): _mcc_calc_2_wifi_slot_worsecase_pattern fail, we use default worsecase pattern\n");
}
dur_ref = role_ref->policy.dur_info;
dur_ano = role_ano->policy.dur_info;
if (m_pattern->tob_r > (dur_ref.dur - MIN_RX_BCN_T)) {
role_ref->policy.protect_bcn = true;
PHL_TRACE(COMP_PHL_MCC, _PHL_WARNING_, "_mcc_fill_2_wrole_bt_pattern(): bcn will in bt dur, we set protect_bcn = true for role_ref\n");
}
if (m_pattern->tob_a > (dur_ano.dur - MIN_RX_BCN_T)) {
role_ano->policy.protect_bcn = true;
PHL_TRACE(COMP_PHL_MCC, _PHL_WARNING_, "_mcc_fill_2_wrole_bt_pattern(): bcn will in bt dur, we set protect_bcn = true for role_ano\n");
}
} else
#endif
if (minfo->bt_info.bt_seg_num == 1) {
minfo->bt_info.add_bt_role = true;
if (RTW_PHL_STATUS_SUCCESS !=
_mcc_calculate_2wslot_1btslot_pattern(&dur_ref,
&dur_ano, minfo->bt_info.bt_dur, m_pattern)) {
PHL_TRACE(COMP_PHL_MCC, _PHL_WARNING_, "_mcc_fill_2_wrole_bt_pattern(): _mcc_calculate_2wslot_1btslot_pattern fail, we use default worsecase pattern\n");
_mcc_set_2_clients_worsecase_default_pattern(m_pattern, dur_ref.dur);
}
} else {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "_mcc_fill_2_wrole_bt_pattern(): error bt_seg_num(%d)\n",
minfo->bt_info.bt_seg_num);
}
#if 0
exit:
#endif
return;
}
bool _mcc_fill_2wrole_pattern_with_limitation(struct phl_mcc_info *minfo,
struct rtw_phl_mcc_role *role_ref, struct rtw_phl_mcc_role *role_ano)
{
struct rtw_phl_mcc_en_info *en_info = &minfo->en_info;
struct rtw_phl_mcc_dur_info *dur_i_r = &role_ref->policy.dur_info;
struct rtw_phl_mcc_dur_info *dur_i_a = &role_ano->policy.dur_info;
struct rtw_phl_mcc_policy_info *policy_i = NULL;
struct rtw_phl_mcc_courtesy *courtesy_i = &en_info->m_pattern.courtesy_i;
bool ret = false;
if (!(dur_i_r->dur_limit.enable || dur_i_a->dur_limit.enable))
goto exit;
if (dur_i_r->dur_limit.enable && dur_i_a->dur_limit.enable) {
/*implement in phase????*/
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "_mcc_fill_2wrole_pattern_with_limitation(): Not support for all enable limitation, tag_r(%d), tag_a(%d)\n",
dur_i_r->dur_limit.tag, dur_i_a->dur_limit.tag);
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "_mcc_fill_2wrole_pattern_with_limitation(): we ignore the limitation of time slot, it will degrade performance\n");
goto exit;
}
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, ">>> _mcc_fill_2wrole_pattern_with_limitation\n");
if (_mcc_discision_duration_for_2role_bt(minfo)) {
_mcc_fill_2_wrole_bt_pattern(minfo, role_ref, role_ano);
} else if (RTW_PHL_STATUS_SUCCESS != _mcc_calculate_2_wrole_pattern(
dur_i_r, dur_i_a,
en_info->m_pattern.bcns_offset,
&en_info->m_pattern)) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "_mcc_fill_2wrole_pattern_with_limitation(): Calc pattern fail with limitation of time slot, we ignore the limitation of time slot, it will degrade performance\n");
goto exit;
}
/*_mcc_fill_bt_slot(minfo, dur_i_r, dur_i_a);*/
if (true == courtesy_i->c_en) {
policy_i = &courtesy_i->src_role->policy;
policy_i->courtesy_en = true;
policy_i->courtesy_num = courtesy_i->c_num;
policy_i->courtesy_target = (u8)courtesy_i->tgt_role->macid;
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_fill_2wrole_pattern_with_limitation(): Enable courtesy function, time slot of macid:0x%x replace by time slot of macid:0x%x, courtesy_num(%d)\n",
courtesy_i->src_role->macid, policy_i->courtesy_target,
policy_i->courtesy_num);
}
ret =true;
exit:
return ret;
}
void _mcc_fill_2_clients_pattern(struct phl_mcc_info *minfo, u8 worsecase,
struct rtw_phl_mcc_role *role_ref, struct rtw_phl_mcc_role *role_ano)
{
struct rtw_phl_mcc_en_info *en_info = &minfo->en_info;
if (!worsecase) {
_mcc_calculate_2_clients_pattern(role_ref->policy.dur_info.dur,
role_ano->policy.dur_info.dur,
en_info->m_pattern.bcns_offset,
&en_info->m_pattern);
goto exit;
}
if (RTW_PHL_STATUS_SUCCESS != _mcc_calc_2_clients_worsecase_pattern(
role_ref->policy.dur_info.dur,
role_ano->policy.dur_info.dur,
en_info->m_pattern.bcns_offset,
role_ref->bcn_intvl,
&en_info->m_pattern)) {
_mcc_set_2_clients_worsecase_default_pattern(&en_info->m_pattern,
role_ref->policy.dur_info.dur);
PHL_TRACE(COMP_PHL_MCC, _PHL_WARNING_, "_mcc_fill_info_for_2_clients_mode(): _mcc_calc_2_clients_worsecase_pattern fail, we use default worsecase pattern\n");
}
exit:
return;
}
enum rtw_phl_status _mcc_get_2_clients_bcn_offset(struct phl_info_t *phl,
u16 *offset, struct rtw_phl_mcc_role *role_ref,
struct rtw_phl_mcc_role *role_ano)
{
enum rtw_phl_status status = RTW_PHL_STATUS_FAILURE;
u32 tsf_ref_h = 0, tsf_ref_l = 0, tsf_ano_h = 0, tsf_ano_l = 0;
u64 tsf_ref = 0, tsf_ano = 0;
u16 ofst_r = 0, ofst_a = 0;
if (RTW_HAL_STATUS_SUCCESS != rtw_hal_mcc_get_2ports_tsf(phl->hal,
role_ref->group, role_ref->macid, role_ano->macid,
&tsf_ref_h, &tsf_ref_l, &tsf_ano_h, &tsf_ano_l)) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "_mcc_get_2_clients_bcn_offset(): Get tsf failed\n");
goto exit;
}
tsf_ref = tsf_ref_h;
tsf_ref = tsf_ref << 32;
tsf_ref |= tsf_ref_l;
tsf_ano = tsf_ano_h;
tsf_ano = tsf_ano << 32;
tsf_ano |= tsf_ano_l;
/*calculate the value between current TSF and TBTT*/
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_get_2_clients_bcn_offset(): role_ref calc_offset_from_tbtt\n");
phl_calc_offset_from_tbtt(phl, role_ref->wrole, tsf_ref, &ofst_r);
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_get_2_clients_bcn_offset(): role_ano calc_offset_from_tbtt\n");
phl_calc_offset_from_tbtt(phl, role_ano->wrole, tsf_ano, &ofst_a);
if (ofst_r < ofst_a)
ofst_r += role_ref->bcn_intvl;
*offset = ofst_r - ofst_a;
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_get_2_clients_bcn_offset(): bcn offset(%d)\n",
*offset);
status = RTW_PHL_STATUS_SUCCESS;
exit:
return status;
}
/**
* set defalut pattern for ap slot+sta slot
* | Wifi1 slot | Wifi2 slot | Wifi1 slot | Wifi2 slot |
* <tob_r> Bcn_r <toa_r> <tob_r> Bcn_r <toa_r>
* <tob_a> Bcn_a <toa_a> <tob_a> Bcn_a <toa_a>
* < bcns_offset >
**/
void _mcc_set_ap_client_default_pattern(struct phl_mcc_info *minfo,
u16 *bcns_offet)
{
struct rtw_phl_mcc_en_info *en_info = &minfo->en_info;
struct rtw_phl_mcc_pattern *m_pattern = &en_info->m_pattern;
struct rtw_phl_mcc_role *role_ref = get_ref_role(en_info);
struct rtw_phl_mcc_role *role_ano = (role_ref == &en_info->mcc_role[0])
? &en_info->mcc_role[1] : &en_info->mcc_role[0];
*bcns_offet = (u8)(en_info->mcc_intvl / 2);
m_pattern->toa_r= role_ref->policy.dur_info.dur / 2;
m_pattern->tob_r = role_ref->policy.dur_info.dur - m_pattern->toa_r;
m_pattern->tob_a = (u8)(*bcns_offet - m_pattern->toa_r);
m_pattern->toa_a = role_ano->policy.dur_info.dur - m_pattern->tob_a;
_mcc_fill_slot_info(m_pattern, false, role_ref->policy.dur_info.dur, role_ref);
_mcc_fill_slot_info(m_pattern, false, role_ano->policy.dur_info.dur, role_ano);
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_set_ap_client_default_pattern(): tob_r(%d), toa_r(%d), tob_a(%d), toa_a(%d)\n",
m_pattern->tob_r, m_pattern->toa_r, m_pattern->tob_a, m_pattern->toa_a);
}
void _mcc_set_worsecase_dur_for_2_clients_mode(
struct rtw_phl_mcc_dur_info *dur_i_1,
struct rtw_phl_mcc_dur_info *dur_i_2,
u16 *mcc_intvl)
{
/*need to get from core layer for worsecase??*/
if (dur_i_1->dur > dur_i_2->dur) {
*mcc_intvl = WORSECASE_INTVL;
dur_i_1->dur = CLIENTS_WORSECASE_LARGE_DUR;
dur_i_2->dur = (*mcc_intvl - dur_i_1->dur);
} else {
*mcc_intvl = WORSECASE_INTVL;
dur_i_2->dur = CLIENTS_WORSECASE_LARGE_DUR;
dur_i_1->dur = (*mcc_intvl - dur_i_2->dur);
}
}
void _mcc_set_dur_for_2_clients_mode(
struct rtw_phl_mcc_dur_info *dur_i_1,
struct rtw_phl_mcc_dur_info *dur_i_2,
u16 *mcc_intvl)
{
u16 *dur1 = &dur_i_1->dur, *dur2 = &dur_i_2->dur;
if (*dur1 == MCC_DUR_NONSPECIFIC) {
*dur1 = (*mcc_intvl - (*dur2));
} else {
*dur2 = (*mcc_intvl - (*dur1));
}
PHL_TRACE(COMP_PHL_MCC, _PHL_WARNING_, "_mcc_set_dur_for_2_clients_mode(): Original dur1(%d), dur2(%d)\n",
*dur1, *dur2);
if (*dur1 < MIN_CLIENT_DUR) {
*dur1 = MIN_CLIENT_DUR;
*dur2 = (*mcc_intvl - (*dur1));
PHL_TRACE(COMP_PHL_MCC, _PHL_WARNING_, "_mcc_set_dur_for_2_clients_mode(): Core specific unsuitable duration, we adjust dur to dur1(%d) and dur2(%d)\n",
*dur1, *dur2);
} else if (*dur2 < MIN_CLIENT_DUR) {
*dur2 = MIN_CLIENT_DUR;
*dur1 = (*mcc_intvl - (*dur2));
PHL_TRACE(COMP_PHL_MCC, _PHL_WARNING_, "_mcc_set_dur_for_2_clients_mode(): Core specific unsuitable duration, we adjust dur to dur1(%d) and dur2(%d)\n",
*dur1, *dur2);
}
if ((dur_i_1->dur_limit.enable) && (dur_i_1->dur_limit.max_dur != 0) &&
(*dur1 > dur_i_1->dur_limit.max_dur)) {
*dur1 = dur_i_1->dur_limit.max_dur;
*dur2 = (*mcc_intvl - (*dur1));
PHL_TRACE(COMP_PHL_MCC, _PHL_WARNING_, "_mcc_set_dur_for_2_clients_mode(): dur1 > max_dur(%d), we adjust dur to dur1(%d) and dur2(%d)\n",
dur_i_1->dur_limit.max_dur, *dur1, *dur2);
}
if ((dur_i_2->dur_limit.enable) && (dur_i_2->dur_limit.max_dur != 0) &&
(*dur2 > dur_i_2->dur_limit.max_dur)) {
*dur2 = dur_i_2->dur_limit.max_dur;
*dur1 = (*mcc_intvl - (*dur2));
PHL_TRACE(COMP_PHL_MCC, _PHL_WARNING_, "_mcc_set_dur_for_2_clients_mode(): dur2 > max_dur(%d), we adjust dur to dur1(%d) and dur2(%d)\n",
dur_i_2->dur_limit.max_dur, *dur1, *dur2);
}
}
enum rtw_phl_status _mcc_fill_info_for_2_clients_mode(struct phl_info_t *phl,
struct phl_mcc_info *minfo)
{
enum rtw_phl_status status = RTW_PHL_STATUS_FAILURE;
struct rtw_phl_mcc_en_info *en_info = &minfo->en_info;
struct rtw_phl_mcc_role *role_ref = get_ref_role(en_info);
struct rtw_phl_mcc_role *role_ano = (role_ref == &en_info->mcc_role[0])
? &en_info->mcc_role[1] : &en_info->mcc_role[0];
u16 bcns_offset = 0;
bool worsecase = false;
en_info->mcc_intvl = role_ref->bcn_intvl;
if (RTW_PHL_STATUS_SUCCESS != _mcc_get_2_clients_bcn_offset(phl,
&bcns_offset, role_ref, role_ano)) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "_mcc_fill_info_for_2_clients_mode(): Get bcn offset fail\n");
goto exit;
}
en_info->m_pattern.role_ref = role_ref;
en_info->m_pattern.role_ano = role_ano;
en_info->m_pattern.bcns_offset = bcns_offset;
if ((bcns_offset < MIN_BCNS_OFFSET) ||
(bcns_offset > (role_ref->bcn_intvl - MIN_BCNS_OFFSET))) {
worsecase = true;
PHL_TRACE(COMP_PHL_MCC, _PHL_WARNING_, "_mcc_fill_info_for_2_clients_mode(): worsecase, bcns_offset(%d) < %d or bcns_offset > %d\n",
bcns_offset, MIN_BCNS_OFFSET,
(role_ref->bcn_intvl - MIN_BCNS_OFFSET));
}
_mcc_set_dur_for_2_clients_mode(&role_ref->policy.dur_info,
&role_ano->policy.dur_info,
&en_info->mcc_intvl);
if (_mcc_fill_2wrole_pattern_with_limitation(minfo, role_ref, role_ano)) {
/*wifi slot, bt slot, and only 1 wifi slot with limitation*/
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_fill_info_for_2_clients_mode(): _mcc_fill_2wrole_pattern_with_limitation\n");
} else if (_mcc_discision_duration_for_2role_bt(minfo)) {
_mcc_fill_2_wrole_bt_pattern(minfo, role_ref, role_ano);
} else {
/*We only adjust dur for all wifi slot in worsecase*/
if (worsecase) {
_mcc_set_worsecase_dur_for_2_clients_mode(
&role_ref->policy.dur_info,
&role_ano->policy.dur_info, &en_info->mcc_intvl);
}
_mcc_fill_2_clients_pattern(minfo, worsecase, role_ref, role_ano);
}
if (RTW_PHL_STATUS_SUCCESS != _mcc_calculate_start_tsf(phl, en_info)) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "_mcc_fill_info_for_2_clients_mode(): calculate start tsf fail\n");
goto exit;
}
status = RTW_PHL_STATUS_SUCCESS;
exit:
return status;
}
void _mcc_set_dur_for_ap_client_mode(u16 *ap_dur, u16 *client_dur, u16 mcc_intvl)
{
if (*ap_dur == MCC_DUR_NONSPECIFIC)
*ap_dur = mcc_intvl - *client_dur;
if (*ap_dur < MIN_AP_DUR) {
PHL_TRACE(COMP_PHL_MCC, _PHL_WARNING_, "_mcc_set_dur_for_ap_client_mode(): ap_dur(%d) < MIN_AP_DUR(%d), set ap_dur = MIN_AP_DUR\n",
*ap_dur, MIN_AP_DUR);
*ap_dur = MIN_AP_DUR;
} else if (*ap_dur > (mcc_intvl - MIN_CLIENT_DUR)) {
PHL_TRACE(COMP_PHL_MCC, _PHL_WARNING_, "_mcc_set_dur_for_ap_client_mode(): ap_dur(%d) < MAX_AP_DUR(%d), set ap_dur = MAX_AP_DUR\n",
*ap_dur, (mcc_intvl - MIN_CLIENT_DUR));
*ap_dur = mcc_intvl - MIN_CLIENT_DUR;
}
*client_dur = mcc_intvl - *ap_dur;
}
enum rtw_phl_status _mcc_fill_info_for_ap_client_mode(
struct phl_info_t *phl, struct phl_mcc_info *minfo)
{
enum rtw_phl_status status = RTW_PHL_STATUS_FAILURE;
struct rtw_phl_mcc_en_info *en_info = &minfo->en_info;
struct rtw_phl_mcc_sync_tsf_info *sync_info = &en_info->sync_tsf_info;
struct rtw_phl_mcc_role *ap_role = NULL;
struct rtw_phl_mcc_role *client_role = NULL;
struct rtw_phl_mcc_role *role_ref = get_ref_role(en_info);
struct rtw_phl_mcc_role *role_ano = (role_ref == &en_info->mcc_role[0])
? &en_info->mcc_role[1] : &en_info->mcc_role[0];
ap_role = _mcc_get_mrole_by_category(en_info, MCC_ROLE_AP_CAT);
client_role = _mcc_get_mrole_by_category(en_info, MCC_ROLE_CLIENT_CAT);
if (ap_role == NULL || client_role == NULL) {
_mcc_dump_en_info(en_info);
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "_mcc_fill_info_for_ap_client_mode(): (ap_role == NULL || client_role == NULL)\n");
goto exit;
}
en_info->m_pattern.role_ref = role_ref;
en_info->m_pattern.role_ano = role_ano;
en_info->mcc_intvl = ap_role->bcn_intvl;
_mcc_set_dur_for_ap_client_mode(&ap_role->policy.dur_info.dur,
&client_role->policy.dur_info.dur,
en_info->mcc_intvl);
if (_mcc_discision_duration_for_2role_bt(minfo)) {
en_info->m_pattern.bcns_offset = AP_CLIENT_OFFSET;
_mcc_fill_2_wrole_bt_pattern(minfo, role_ref, role_ano);
} else {
_mcc_set_ap_client_default_pattern(minfo,
&en_info->m_pattern.bcns_offset);
}
if (RTW_PHL_STATUS_SUCCESS != rtw_phl_tbtt_sync(phl,
client_role->wrole, ap_role->wrole,
en_info->m_pattern.bcns_offset, true,
&sync_info->offset)) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "_mcc_fill_info_for_ap_client_mode(): Sync tsf fail\n");
goto exit;
}
sync_info->source = client_role->macid;
sync_info->target = ap_role->macid;
sync_info->sync_en = true;
if (RTW_PHL_STATUS_SUCCESS != _mcc_calculate_start_tsf(phl, en_info)) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "_mcc_fill_info_for_ap_client_mode(): calculate start tsf fail\n");
goto exit;
}
status = RTW_PHL_STATUS_SUCCESS;
exit:
return status;
}
/*
* | Wifi slot | BT slot |
* <tob_r> Bcn_r <toa_r>
**/
enum rtw_phl_status _mcc_fill_info_for_ap_bt_mode(
struct phl_info_t *phl, struct phl_mcc_info *minfo)
{
enum rtw_phl_status status = RTW_PHL_STATUS_FAILURE;
struct rtw_phl_mcc_en_info *en_info = &minfo->en_info;
struct rtw_phl_mcc_role *role_ref = get_ref_role(en_info);
struct rtw_phl_mcc_pattern *m_pattern = &en_info->m_pattern;
minfo->bt_info.add_bt_role = true;
en_info->mcc_intvl = role_ref->bcn_intvl;
role_ref->policy.dur_info.dur = (u8)(en_info->mcc_intvl -
minfo->bt_info.bt_dur);
m_pattern->role_ref = role_ref;
m_pattern->toa_r= role_ref->policy.dur_info.dur / 2;
m_pattern->tob_r = role_ref->policy.dur_info.dur - m_pattern->toa_r;
_mcc_fill_slot_info(m_pattern, false, role_ref->policy.dur_info.dur, role_ref);
_mcc_fill_slot_info(m_pattern, true, minfo->bt_info.bt_dur, NULL);
if (RTW_PHL_STATUS_SUCCESS != _mcc_calculate_start_tsf(phl, en_info)) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "_mcc_fill_info_for_ap_bt_mode(): calculate start tsf fail\n");
goto exit;
}
status = RTW_PHL_STATUS_SUCCESS;
exit:
return status;
}
enum rtw_phl_status _mcc_pkt_offload_for_client(struct phl_info_t *phl, u8 macid)
{
enum rtw_phl_status status = RTW_PHL_STATUS_FAILURE;
struct rtw_phl_stainfo_t *phl_sta = NULL;
struct rtw_pkt_ofld_null_info null_info = {0};
void *d = phl_to_drvpriv(phl);
u32 null_token = 0;
phl_sta = rtw_phl_get_stainfo_by_macid(phl, macid);
if (phl_sta == NULL) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "_mcc_pkt_offload_for_client(): get sta fail, macid(%d)\n",
macid);
goto exit;
}
if (NOT_USED != phl_pkt_ofld_get_id(phl, macid,
PKT_TYPE_NULL_DATA)) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "_mcc_pkt_offload_for_client(): macid(%d), we had already offload NULL Pkt\n",
macid);
status = RTW_PHL_STATUS_SUCCESS;
goto exit;
}
_os_mem_cpy(d, &(null_info.a1[0]), &(phl_sta->mac_addr[0]),
MAC_ADDRESS_LENGTH);
_os_mem_cpy(d,&(null_info.a2[0]), &(phl_sta->wrole->mac_addr[0]),
MAC_ADDRESS_LENGTH);
_os_mem_cpy(d, &(null_info.a3[0]), &(phl_sta->mac_addr[0]),
MAC_ADDRESS_LENGTH);
if (RTW_PHL_STATUS_SUCCESS != phl_pkt_ofld_request(phl, macid,
PKT_TYPE_NULL_DATA, &null_token,
__func__, &null_info)) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "_mcc_pkt_offload_for_client(): Pkt offload fail, macid(%d)\n",
macid);
goto exit;
}
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "_mcc_pkt_offload_for_client(): offload ok, macid(%d), null_token(%d)\n",
macid, null_token);
status = RTW_PHL_STATUS_SUCCESS;
exit:
return status;
}
enum rtw_phl_status _mcc_pkt_offload(struct phl_info_t *phl,
struct rtw_phl_mcc_en_info *info)
{
enum rtw_phl_status status = RTW_PHL_STATUS_FAILURE;
struct rtw_phl_mcc_role *mcc_role = NULL;
u8 midx = 0;
for (midx = 0; midx < MCC_ROLE_NUM; midx++) {
if (!(info->mrole_map & BIT(midx)))
continue;
mcc_role = &info->mcc_role[midx];
if (_mcc_is_client_category(mcc_role->wrole)) {
if (RTW_PHL_STATUS_SUCCESS !=
_mcc_pkt_offload_for_client(phl,
(u8)mcc_role->macid)) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "_mcc_pkt_offload_for_client(): mcc_role index(%d)\n",
midx);
goto exit;
}
}
}
status = RTW_PHL_STATUS_SUCCESS;
exit:
return status;
}
enum rtw_phl_status _mcc_update_2_clients_pattern(struct phl_info_t *phl,
struct phl_mcc_info *ori_minfo,
struct phl_mcc_info *new_minfo)
{
enum rtw_phl_status status = RTW_PHL_STATUS_FAILURE;
if (RTW_PHL_STATUS_SUCCESS != _mcc_fill_info_for_2_clients_mode(phl,
new_minfo)) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "_mcc_update_2_clients_pattern(): fill info fail for 2clients mode\n");
goto error;
}
if (RTW_HAL_STATUS_SUCCESS != rtw_hal_mcc_change_pattern(phl->hal,
&ori_minfo->en_info, &new_minfo->en_info,
&new_minfo->bt_info)) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "_mcc_update_2_clients_pattern(): set duration fail\n");
goto error;
}
status = RTW_PHL_STATUS_SUCCESS;
goto exit;
error:
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "_mcc_update_2_clients_pattern(): Update fail\n");
exit:
return status;
}
enum rtw_phl_status _mcc_update_ap_client_pattern(struct phl_info_t *phl,
struct phl_mcc_info *ori_minfo, struct phl_mcc_info *new_minfo)
{
enum rtw_phl_status status = RTW_PHL_STATUS_FAILURE;
if (RTW_PHL_STATUS_SUCCESS != _mcc_fill_info_for_ap_client_mode(phl,
new_minfo)) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "_mcc_update_ap_client_pattern(): fill info fail for ap client mode\n");
goto error;
}
if (RTW_HAL_STATUS_SUCCESS != rtw_hal_mcc_change_pattern(phl->hal,
&ori_minfo->en_info, &new_minfo->en_info,
&new_minfo->bt_info)) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "_mcc_update_ap_client_pattern(): set duration fail\n");
goto error;
}
if ((new_minfo->en_info.sync_tsf_info.sync_en) &&
(new_minfo->en_info.sync_tsf_info.offset !=
ori_minfo->en_info.sync_tsf_info.offset)) {
if (RTW_HAL_STATUS_SUCCESS != rtw_hal_mcc_sync_enable(phl->hal,
&new_minfo->en_info)) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "_mcc_update_ap_client_pattern(): set tsf sync fail\n");
goto error;
}
}
_mcc_up_noa(phl, new_minfo);
status = RTW_PHL_STATUS_SUCCESS;
goto exit;
error:
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "_mcc_update_ap_client_pattern(): Update fail\n");
exit:
return status;
}
enum rtw_phl_status _mcc_update_ap_bt_pattern(struct phl_info_t *phl,
struct phl_mcc_info *ori_minfo, struct phl_mcc_info *new_minfo)
{
enum rtw_phl_status status = RTW_PHL_STATUS_FAILURE;
if (RTW_PHL_STATUS_SUCCESS != _mcc_fill_info_for_ap_bt_mode(phl,
new_minfo)) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "_mcc_update_ap_bt_pattern(): fill info fail for ap_bt\n");
goto error;
}
if (RTW_HAL_STATUS_SUCCESS != rtw_hal_mcc_change_pattern(phl->hal,
&ori_minfo->en_info, &new_minfo->en_info,
&new_minfo->bt_info)) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "_mcc_update_ap_bt_pattern(): set duration fail\n");
goto error;
}
_mcc_up_noa(phl, new_minfo);
status = RTW_PHL_STATUS_SUCCESS;
goto exit;
error:
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "_mcc_update_ap_bt_pattern(): Update fail\n");
exit:
return status;
}
enum rtw_phl_status _mcc_duration_change(struct phl_info_t *phl,
struct phl_mcc_info *minfo, struct phl_mcc_info *new_minfo)
{
enum rtw_phl_status status = RTW_PHL_STATUS_FAILURE;
if (RTW_PHL_TDMRA_AP_CLIENT_WMODE == minfo->mcc_mode) {
if (RTW_PHL_STATUS_SUCCESS != _mcc_update_ap_client_pattern(
phl, minfo, new_minfo)) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "_mcc_duration_change(): update ap_client fail\n");
goto exit;
}
} else if (RTW_PHL_TDMRA_2CLIENTS_WMODE == minfo->mcc_mode) {
if (RTW_PHL_STATUS_SUCCESS != _mcc_update_2_clients_pattern(
phl, minfo, new_minfo)) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "_mcc_duration_change(): update ap_client fail\n");
goto exit;
}
} else if (RTW_PHL_TDMRA_AP_WMODE == minfo->mcc_mode) {
if (RTW_PHL_STATUS_SUCCESS != _mcc_update_ap_bt_pattern(
phl, minfo, new_minfo)) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "_mcc_duration_change(): update ap_bt fail\n");
goto exit;
}
} else {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "_mcc_duration_change(): Undefined mcc_mode(%d)\n",
minfo->mcc_mode);
goto exit;
}
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_duration_change(): Update success\n");
_os_mem_cpy(phl_to_drvpriv(phl), minfo, new_minfo,
sizeof(struct phl_mcc_info));
status = RTW_PHL_STATUS_SUCCESS;
exit:
return status;
}
void _mcc_2_clients_tracking(struct phl_info_t *phl,
struct phl_mcc_info *minfo
)
{
struct rtw_phl_mcc_en_info *en_info = NULL;
struct rtw_phl_mcc_pattern *m_pattern = NULL;
struct rtw_phl_mcc_role *role_ref = NULL;
struct rtw_phl_mcc_role *role_ano = NULL;
struct phl_mcc_info new_minfo = {0};
u16 bcns_offset = 0, diff = 0, tol = 0;/*tolerance*/
bool negative_sign = false;
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, ">>> _mcc_2_clients_tracking\n");
_os_mem_cpy(phl_to_drvpriv(phl), &new_minfo, minfo,
sizeof(struct phl_mcc_info));
en_info = &new_minfo.en_info;
m_pattern = &en_info->m_pattern;
role_ref = get_ref_role(en_info);
role_ano = (role_ref == &en_info->mcc_role[0]) ? &en_info->mcc_role[1] :
&en_info->mcc_role[0];
if (RTW_PHL_STATUS_SUCCESS != _mcc_get_2_clients_bcn_offset(phl,
&bcns_offset, role_ref, role_ano)) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "_mcc_2_clients_tracking(): Get bcn offset fail\n");
goto exit;
}
if (bcns_offset > m_pattern->bcns_offset) {
diff = bcns_offset - m_pattern->bcns_offset;
} else {
diff = m_pattern->bcns_offset - bcns_offset;
negative_sign = true;
}
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_2_clients_tracking(): old bcns_offset(%d), new bcns_offset(%d)\n",
m_pattern->bcns_offset, bcns_offset);
_mcc_dump_pattern(m_pattern);
_mcc_dump_ref_role_info(en_info);
if (en_info->m_pattern.courtesy_i.c_en) {
tol = CLIENTS_TRACKING_COURTESY_TH;
goto decision;
}
if (en_info->mcc_intvl == WORSECASE_INTVL) {
tol = CLIENTS_TRACKING_WORSECASE_TH;
goto decision;
}
if (negative_sign) {
if (m_pattern->tob_a <= EARLY_RX_BCN_T) {
tol = CLIENTS_TRACKING_CRITICAL_POINT_TH;
} else if (m_pattern->tob_a >= (2 * EARLY_RX_BCN_T)){
tol = m_pattern->tob_a - ((3 * EARLY_RX_BCN_T) / 2);
} else {
tol = CLIENTS_TRACKING_TH;
}
} else {
if (m_pattern->toa_a <= MIN_RX_BCN_T) {
tol = CLIENTS_TRACKING_CRITICAL_POINT_TH;
} else if (m_pattern->toa_a >= (2 * MIN_RX_BCN_T)){
tol = m_pattern->toa_a - ((3 * MIN_RX_BCN_T) / 2);
} else {
tol = CLIENTS_TRACKING_TH;
}
}
decision:
if (diff < tol)
goto exit;
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_2_clients_tracking(): Need to update new 2clients pattern, negative_sign(%d), diff(%d), tolerance(%d), mcc_intvl(%d)\n",
negative_sign, diff, tol, en_info->mcc_intvl);
_mcc_reset_minfo(phl, &new_minfo, MINFO_RESET_BT_INFO |
MINFO_RESET_PATTERN_INFO);
/*fill original bt slot*/
_mcc_fill_bt_dur(phl, &new_minfo);
/*get original wifi time slot*/
_mcc_fill_mcc_role_policy_info(phl, role_ref->wrole, role_ref);
_mcc_fill_mcc_role_policy_info(phl, role_ano->wrole, role_ano);
if (RTW_PHL_STATUS_SUCCESS != _mcc_update_2_clients_pattern(phl,
minfo, &new_minfo)) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "_mcc_2_clients_tracking(): update 2clients fail\n");
goto exit;
}
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "_mcc_2_clients_tracking(): update new pattern ok\n");
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "_mcc_2_clients_tracking(): old pattern:\n");
_mcc_dump_pattern(&en_info->m_pattern);
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "_mcc_2_clients_tracking(): new pattern:\n");
_mcc_dump_pattern(&new_minfo.en_info.m_pattern);
_os_mem_cpy(phl_to_drvpriv(phl), minfo, &new_minfo,
sizeof(struct phl_mcc_info));
exit:
return;
}
enum rtw_phl_status rtw_phl_mcc_ap_bt_coex_enable(struct phl_info_t *phl,
struct rtw_wifi_role_t *cur_role)
{
enum rtw_phl_status status = RTW_PHL_STATUS_FAILURE;
struct phl_mcc_info *minfo = NULL;
struct rtw_phl_mcc_en_info *en_info = NULL;
struct hw_band_ctl_t *band_ctrl = get_band_ctrl(phl, cur_role->hw_band);
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, ">>> rtw_phl_mcc_ap_bt_coex_enable(): cur_role->type(%d)\n",
cur_role->type);
if (!is_mcc_init(phl)) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "rtw_phl_mcc_ap_bt_coex_enable(): mcc is not init, please check code\n");
goto exit;
}
minfo = get_mcc_info(phl, cur_role->hw_band);
en_info = &minfo->en_info;
if (MCC_NONE != minfo->state && MCC_STOP != minfo->state) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "rtw_phl_mcc_ap_bt_coex_enable(): (MCC_NONE != minfo->state || MCC_STOP != minfo->state(%d)), please check code flow\n",
minfo->state);
_mcc_dump_state(&minfo->state);
goto exit;
}
_mcc_set_state(minfo, MCC_CFG_EN_INFO);
_mcc_reset_minfo(phl, minfo, (MINFO_RESET_EN_INFO | MINFO_RESET_MODE |
MINFO_RESET_ROLE_MAP | MINFO_RESET_COEX_MODE |
MINFO_RESET_BT_INFO));
if (RTW_PHL_STATUS_SUCCESS != _mcc_get_role_map(phl, band_ctrl,
&minfo->role_map)) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "rtw_phl_mcc_ap_bt_coex_enable(): Get role map failed\n");
goto _cfg_info_fail;
}
if (RTW_PHL_STATUS_SUCCESS != _mcc_transfer_mode(phl, minfo,
minfo->role_map)) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "rtw_phl_mcc_ap_bt_coex_enable(): transfer mcc mode failed\n");
goto _cfg_info_fail;
}
if (RTW_PHL_TDMRA_AP_WMODE != minfo->mcc_mode) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "rtw_phl_mcc_ap_bt_coex_enable(): error wmode\n");
_mcc_dump_mode(&minfo->mcc_mode);
goto _cfg_info_fail;
}
if (RTW_PHL_STATUS_SUCCESS != _mcc_fill_role_info(phl, en_info,
minfo->role_map, cur_role)) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "rtw_phl_mcc_ap_bt_coex_enable(): fill role info failed\n");
goto _cfg_info_fail;
}
_mcc_fill_coex_mode(phl, minfo);
_mcc_fill_bt_dur(phl, minfo);
if (RTW_PHL_STATUS_SUCCESS != _mcc_get_mrole_idx_by_wrole(minfo,
cur_role, &en_info->ref_role_idx)) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "rtw_phl_mcc_ap_bt_coex_enable(): fill ref_role idx failed\n");
goto _cfg_info_fail;
}
if (RTW_PHL_STATUS_SUCCESS != _mcc_fill_info_for_ap_bt_mode(phl, minfo)) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "rtw_phl_mcc_ap_bt_coex_enable(): fill ref_role idx failed\n");
goto _cfg_info_fail;
}
_mcc_set_state(minfo, MCC_TRIGGER_FW_EN);
if (rtw_hal_mcc_enable(phl->hal, en_info, &minfo->bt_info) !=
RTW_HAL_STATUS_SUCCESS) {
_mcc_set_state(minfo, MCC_FW_EN_FAIL);
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "rtw_phl_mcc_ap_bt_coex_enable(): Enable FW mcc Fail\n");
goto exit;
}
_mcc_set_state(minfo, MCC_RUNING);
PHL_TRACE(COMP_PHL_MCC, _PHL_ALWAYS_, "rtw_phl_mcc_ap_bt_coex_enable(): Enable FW mcc ok\n");
_mcc_up_noa(phl, minfo);
_mcc_dump_mcc_info(minfo);
_mcc_up_fw_log_setting(phl, minfo);
status = RTW_PHL_STATUS_SUCCESS;
goto exit;
_cfg_info_fail:
_mcc_set_state(minfo, MCC_NONE);
exit:
PHL_TRACE(COMP_PHL_MCC, _PHL_ALWAYS_, "<<< rtw_phl_mcc_ap_bt_coex_enable():status(%d)\n",
status);
return status;
}
enum rtw_phl_status rtw_phl_mcc_go_bt_coex_disable(struct phl_info_t *phl,
struct rtw_wifi_role_t *spec_role)
{
enum rtw_phl_status status = RTW_PHL_STATUS_FAILURE;
struct phl_mcc_info *minfo = NULL;
struct rtw_phl_mcc_en_info *en_info = NULL;
struct rtw_phl_mcc_role *m_role = NULL;
if (!is_mcc_init(phl)) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "rtw_phl_mcc_go_bt_coex_disable(): mcc is not init, please check code\n");
goto exit;
}
minfo = get_mcc_info(phl, spec_role->hw_band);
en_info = &minfo->en_info;
if (MCC_RUNING != minfo->state) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "rtw_phl_mcc_go_bt_coex_disable(): MCC_RUNING != m_info->state, please check code flow\n");
_mcc_dump_state(&minfo->state);
goto exit;
}
if (NULL == (m_role = _mcc_get_mrole_by_wrole(minfo, spec_role))) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "rtw_phl_mcc_go_bt_coex_disable(): Can't get mrole, wrole id(%d), please check code flow\n",
spec_role->id);
goto exit;
}
_mcc_set_state(minfo, MCC_TRIGGER_FW_DIS);
if (rtw_hal_mcc_disable(phl->hal, m_role->group, m_role->macid)
!= RTW_HAL_STATUS_SUCCESS) {
status = RTW_PHL_STATUS_FAILURE;
_mcc_set_state(minfo, MCC_FW_DIS_FAIL);
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_,"rtw_phl_mcc_go_bt_coex_disable(): Disable FW mcc Fail\n");
goto exit;
}
rtw_hal_sync_cur_ch(phl->hal, spec_role->hw_band, spec_role->chandef);
_mcc_set_state(minfo, MCC_STOP);
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "rtw_phl_mcc_go_bt_coex_disable(): Disable FW mcc ok\n");
_mcc_clean_noa(phl, en_info);
status = RTW_PHL_STATUS_SUCCESS;
exit:
PHL_TRACE(COMP_PHL_MCC, _PHL_ALWAYS_, "<<< rtw_phl_mcc_go_bt_coex_disable(): status(%d)\n",
status);
return status;
}
void rtw_phl_mcc_watchdog(struct phl_info_t *phl, u8 band_idx)
{
struct phl_mcc_info *minfo = NULL;
if (!is_mcc_init(phl)) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "rtw_phl_mcc_watchdog(): mcc is not init, please check code\n");
goto exit;
}
minfo = get_mcc_info(phl, band_idx);
if (MCC_RUNING != minfo->state)
goto exit;
if (RTW_PHL_TDMRA_2CLIENTS_WMODE == minfo->mcc_mode)
_mcc_2_clients_tracking(phl, minfo);
exit:
return;
}
enum rtw_phl_status rtw_phl_mcc_duration_change(struct phl_info_t *phl,
struct phl_tdmra_dur_change_info *info)
{
enum rtw_phl_status status = RTW_PHL_STATUS_FAILURE;
struct phl_mcc_info *minfo = get_mcc_info(phl, info->hw_band);
struct phl_mcc_info new_minfo = {0};
struct rtw_phl_mcc_role *spec_mrole = NULL;
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, ">>> rtw_phl_mcc_duration_change\n");
_os_mem_cpy(phl_to_drvpriv(phl), &new_minfo, minfo,
sizeof(struct phl_mcc_info));
_mcc_set_unspecific_dur(&new_minfo);
spec_mrole = _mcc_get_mrole_by_wrole(&new_minfo, info->role);
if (NULL == spec_mrole) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "rtw_phl_mcc_duration_change(): Can't get mrole by wrole(%d), please check code flow\n",
info->role->id);
goto exit;
}
spec_mrole->policy.dur_info.dur = info->dur;
if (RTW_PHL_STATUS_SUCCESS != _mcc_duration_change(phl, minfo,
&new_minfo)) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "rtw_phl_mcc_duration_change(): Change fail\n");
goto exit;
}
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "rtw_phl_mcc_duration_change(): Change success\n");
status = RTW_PHL_STATUS_SUCCESS;
_mcc_dump_mcc_info(&new_minfo);
exit:
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "<<< rtw_phl_mcc_duration_change\n");
return status;
}
enum rtw_phl_status rtw_phl_mcc_bt_duration_change(struct phl_info_t *phl,
struct phl_tdmra_dur_change_info *info)
{
enum rtw_phl_status status = RTW_PHL_STATUS_FAILURE;
struct phl_mcc_info *minfo = get_mcc_info(phl, info->hw_band);
struct phl_mcc_info new_minfo = {0};
struct rtw_phl_mcc_en_info *en_info = NULL;
struct rtw_phl_mcc_role *mrole = NULL;
u8 midx = 0;
bool exist_2g = false;
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, ">>> rtw_phl_mcc_bt_duration_change(): dur(%d)\n",
info->dur);
if (info->dur == minfo->bt_info.bt_dur) {
status = RTW_PHL_STATUS_SUCCESS;
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "rtw_phl_mcc_bt_duration_change(): no change of bt slot(%d), skip it\n",
info->dur);
goto exit;
}
_os_mem_cpy(phl_to_drvpriv(phl), &new_minfo, minfo,
sizeof(struct phl_mcc_info));
en_info = &new_minfo.en_info;
_mcc_reset_minfo(phl, &new_minfo, MINFO_RESET_BT_INFO |
MINFO_RESET_PATTERN_INFO);
_mcc_fill_bt_dur(phl, &new_minfo);
/*fill original wifi time slot*/
for (midx = 0; midx < en_info->mrole_num; midx++) {
if (!(en_info->mrole_map & BIT(midx)))
continue;
mrole = &en_info->mcc_role[midx];
_mcc_fill_mcc_role_policy_info(phl, mrole->wrole, mrole);
if (mrole->chandef->band == BAND_ON_24G)
exist_2g = true;
}
if (false == exist_2g) {
status = RTW_PHL_STATUS_SUCCESS;
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "rtw_phl_mcc_bt_duration_change(): All 5G, Don't care BT duration\n");
goto exit;
}
if (RTW_PHL_STATUS_SUCCESS != _mcc_duration_change(phl, minfo,
&new_minfo)) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "rtw_phl_mcc_bt_duration_change(): Change fail\n");
goto exit;
}
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "rtw_phl_mcc_bt_duration_change(): Change success\n");
status = RTW_PHL_STATUS_SUCCESS;
_mcc_dump_mcc_info(&new_minfo);
exit:
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "<<< rtw_phl_mcc_bt_duration_change(): status(%d)\n",
status);
return status;
}
enum rtw_phl_status rtw_phl_mcc_dur_lim_change(struct phl_info_t *phl,
struct rtw_wifi_role_t *wrole,
struct phl_mcc_dur_lim_req_info *lim_req)
{
enum rtw_phl_status status = RTW_PHL_STATUS_FAILURE;
struct phl_mcc_info *minfo = NULL;
struct phl_mcc_info new_minfo = {0};
struct rtw_phl_mcc_en_info *en_info = NULL;
struct rtw_phl_mcc_role *spec_mrole = NULL;
struct rtw_phl_mcc_role *mrole = NULL;
u8 midx = 0;
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, ">>> rtw_phl_mcc_dur_lim_change()\n");
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "_mcc_fill_dur_lim_info(): dur_req: tag(%d), enable(%d), start_t_h(0x%08x), start_t_l(0x%08x), dur(%d), intvl(%d)\n",
lim_req->tag, lim_req->enable, lim_req->start_t_h,
lim_req->start_t_l, lim_req->dur, lim_req->intvl);
if (!is_mcc_init(phl)) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "rtw_phl_mcc_dur_lim_change(): mcc is not init, please check code\n");
goto exit;
}
minfo = get_mcc_info(phl, wrole->hw_band);
if (MCC_RUNING != minfo->state) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "rtw_phl_mcc_dur_lim_change(): MCC_RUNING != minfo->state, , please check code flow\n");
_mcc_dump_state(&minfo->state);
goto exit;
}
_os_mem_cpy(phl_to_drvpriv(phl), &new_minfo, minfo,
sizeof(struct phl_mcc_info));
spec_mrole = _mcc_get_mrole_by_wrole(&new_minfo, wrole);
if (NULL == spec_mrole) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "rtw_phl_mcc_dur_lim_change(): Can't get mrole by wrole(%d), please check code flow\n",
wrole->id);
goto exit;
}
en_info = &new_minfo.en_info;
/*fill original bt slot*/
_mcc_reset_minfo(phl, &new_minfo, MINFO_RESET_BT_INFO |
MINFO_RESET_PATTERN_INFO);
_mcc_fill_bt_dur(phl, &new_minfo);
/*fill original wifi time slot*/
for (midx = 0; midx < en_info->mrole_num; midx++) {
if (!(en_info->mrole_map & BIT(midx)))
continue;
mrole = &en_info->mcc_role[midx];
_mcc_fill_mcc_role_policy_info(phl, mrole->wrole, mrole);
}
_mcc_fill_dur_lim_info(phl, spec_mrole, lim_req);
if (RTW_PHL_STATUS_SUCCESS != _mcc_duration_change(phl, minfo,
&new_minfo)) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "rtw_phl_mcc_dur_lim_change(): Change fail\n");
goto exit;
}
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "rtw_phl_mcc_dur_lim_change(): Change success\n");
status = RTW_PHL_STATUS_SUCCESS;
exit:
return status;
}
void rtw_phl_mcc_sta_entry_change(struct phl_info_t *phl,
struct rtw_wifi_role_t *wrole)
{
struct phl_mcc_info *minfo = NULL;
struct rtw_phl_mcc_role *mrole = NULL;
if (!is_mcc_init(phl)) {
goto exit;
}
minfo = get_mcc_info(phl, wrole->hw_band);
if (MCC_RUNING != minfo->state) {
goto exit;
}
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, ">>> rtw_phl_mcc_sta_entry_change\n");
minfo = get_mcc_info(phl, wrole->hw_band);
if (NULL == (mrole = _mcc_get_mrole_by_wrole(minfo, wrole))) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "rtw_phl_mcc_sta_entry_change(): Can't get mrole, wrole id(%d), please check code flow\n",
wrole->id);
goto exit;
}
_mcc_fill_macid_bitmap_by_role(phl, mrole);
if (RTW_HAL_STATUS_SUCCESS != rtw_hal_mcc_update_macid_bitmap(
phl->hal, mrole->group,
mrole->macid, &mrole->used_macid)) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "rtw_phl_mcc_sta_entry_change(): Update macid map fail\n");
goto exit;
}
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "rtw_phl_mcc_sta_entry_change(): Update macid map ok\n");
_mcc_dump_mcc_info(minfo);
exit:
return;
}
void phl_mcc_client_link_notify_for_ap(struct phl_info_t *phl,
struct rtw_wifi_role_t *wrole, enum role_state state)
{
struct phl_mcc_info *minfo = NULL;
struct rtw_phl_mcc_role *mrole = NULL;
if (state != PHL_ROLE_MSTS_STA_CONN_START &&
state != PHL_ROLE_MSTS_STA_DIS_CONN)
goto exit;
if (!is_mcc_init(phl)) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "%s(): mcc is not init, please check code\n",
__func__);
goto exit;
}
minfo = get_mcc_info(phl, wrole->hw_band);
if (MCC_RUNING != minfo->state) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "%s(): MCC_RUNING != minfo->state\n",
__func__);
_mcc_dump_state(&minfo->state);
goto exit;
}
if (NULL == (mrole = _mcc_get_mrole_by_wrole(minfo, wrole))) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "%s(): Can't get mrole, wrole id(%d), please check code flow\n",
__func__, wrole->id);
goto exit;
}
_mcc_fill_macid_bitmap_by_role(phl, mrole);
if (RTW_HAL_STATUS_SUCCESS != rtw_hal_mcc_update_macid_bitmap(
phl->hal, mrole->group,
mrole->macid, &mrole->used_macid)) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "%s(): Update macid map fail\n",
__func__);
goto exit;
}
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "%s(): Update macid map ok\n",
__func__);
_mcc_dump_mcc_info(minfo);
exit:
return;
}
bool rtw_phl_mcc_inprogress(struct phl_info_t *phl, u8 band_idx)
{
bool ret = false;
struct phl_mcc_info *minfo = NULL;
if (!is_mcc_init(phl)) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "rtw_phl_mcc_inprogress(): mcc is not init, please check code\n");
goto exit;
}
minfo = get_mcc_info(phl, band_idx);
if (MCC_TRIGGER_FW_EN == minfo->state || MCC_RUNING == minfo->state ||
MCC_TRIGGER_FW_DIS == minfo->state ||
MCC_FW_DIS_FAIL == minfo->state) {
ret = true;
}
exit:
return ret;
}
/* Enable Fw MCC
* @cur_role: the role in the current ch.
*/
enum rtw_phl_status rtw_phl_mcc_enable(struct phl_info_t *phl,
struct rtw_wifi_role_t *cur_role)
{
enum rtw_phl_status status = RTW_PHL_STATUS_FAILURE;
struct phl_mcc_info *minfo = NULL;
struct rtw_phl_mcc_en_info *en_info = NULL;
struct hw_band_ctl_t *band_ctrl = get_band_ctrl(phl, cur_role->hw_band);
u8 role_map = 0;
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, ">>> rtw_phl_mcc_enable(): cur_role->type(%d)\n",
cur_role->type);
if (!is_mcc_init(phl)) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "rtw_phl_mcc_enable(): mcc is not init, please check code\n");
goto exit;
}
minfo = get_mcc_info(phl, cur_role->hw_band);
en_info = &minfo->en_info;
if (MCC_NONE != minfo->state && MCC_STOP != minfo->state) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "rtw_phl_mcc_enable(): (MCC_NONE != minfo->state || MCC_STOP != minfo->state(%d)), please check code flow\n",
minfo->state);
_mcc_dump_state(&minfo->state);
goto exit;
}
_mcc_set_state(minfo, MCC_CFG_EN_INFO);
if (RTW_PHL_STATUS_SUCCESS != _mcc_get_role_map(phl, band_ctrl,
&role_map)) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "rtw_phl_mcc_enable(): Get role map failed\n");
goto _cfg_info_fail;
}
_mcc_reset_minfo(phl, minfo, (MINFO_RESET_EN_INFO | MINFO_RESET_MODE |
MINFO_RESET_ROLE_MAP | MINFO_RESET_COEX_MODE |
MINFO_RESET_BT_INFO));
minfo->role_map = role_map;
if (RTW_PHL_STATUS_SUCCESS != _mcc_transfer_mode(phl, minfo,
role_map)) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "rtw_phl_mcc_enable(): transfer mcc mode failed\n");
goto _cfg_info_fail;
}
if (RTW_PHL_STATUS_SUCCESS != _mcc_fill_role_info(phl, en_info,
role_map, cur_role)) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "rtw_phl_mcc_enable(): fill role info failed\n");
goto _cfg_info_fail;
}
_mcc_fill_coex_mode(phl, minfo);
_mcc_fill_bt_dur(phl, minfo);
if (RTW_PHL_STATUS_SUCCESS != _mcc_get_mrole_idx_by_wrole(minfo,
cur_role, &en_info->ref_role_idx)) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "rtw_phl_mcc_enable(): fill ref_role idx failed\n");
goto _cfg_info_fail;
}
if (minfo->mcc_mode == RTW_PHL_TDMRA_AP_CLIENT_WMODE) {
if (RTW_PHL_STATUS_SUCCESS !=
_mcc_fill_info_for_ap_client_mode(phl, minfo)) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "rtw_phl_mcc_enable(): fill info failed for ap_client mode\n");
goto _cfg_info_fail;
}
} else if (minfo->mcc_mode == RTW_PHL_TDMRA_2CLIENTS_WMODE){
if (RTW_PHL_STATUS_SUCCESS !=
_mcc_fill_info_for_2_clients_mode(phl, minfo)) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "rtw_phl_mcc_enable(): fill info failed for 2clients mode\n");
goto _cfg_info_fail;
}
} else {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "rtw_phl_mcc_enable(): Undefined mcc_mode(%d)\n",
minfo->mcc_mode);
goto _cfg_info_fail;
}
_mcc_dump_mcc_info(minfo);
if (RTW_PHL_STATUS_SUCCESS != _mcc_pkt_offload(phl, en_info)) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "rtw_phl_mcc_enable(): pkt offload Fail\n");
goto _cfg_info_fail;
}
_mcc_set_state(minfo, MCC_TRIGGER_FW_EN);
if (rtw_hal_mcc_enable(phl->hal, en_info, &minfo->bt_info) !=
RTW_HAL_STATUS_SUCCESS) {
_mcc_set_state(minfo, MCC_FW_EN_FAIL);
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "rtw_phl_mcc_enable(): Enable FW mcc Fail\n");
goto exit;
}
_mcc_set_state(minfo, MCC_RUNING);
PHL_TRACE(COMP_PHL_MCC, _PHL_ALWAYS_, "rtw_phl_mcc_enable(): Enable FW mcc ok\n");
if (minfo->mcc_mode == RTW_PHL_TDMRA_AP_CLIENT_WMODE)
_mcc_up_noa(phl, minfo);
_mcc_dump_mcc_info(minfo);
_mcc_up_fw_log_setting(phl, minfo);
status = RTW_PHL_STATUS_SUCCESS;
goto exit;
_cfg_info_fail:
_mcc_set_state(minfo, MCC_NONE);
exit:
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "<<< rtw_phl_mcc_enable():status(%d)\n",
status);
return status;
}
/*
* Stop fw mcc
* @ spec_role: You want to fw switch ch to the specific ch of the role when fw stop mcc
*/
enum rtw_phl_status rtw_phl_mcc_disable(struct phl_info_t *phl,
struct rtw_wifi_role_t *spec_role)
{
enum rtw_phl_status status = RTW_PHL_STATUS_FAILURE;
struct phl_mcc_info *minfo = NULL;
struct rtw_phl_mcc_en_info *en_info = NULL;
struct rtw_phl_mcc_role *m_role = NULL;
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, ">>> rtw_phl_mcc_disable()\n");
if (!is_mcc_init(phl)) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "rtw_phl_mcc_disable(): mcc is not init, please check code\n");
goto exit;
}
minfo = get_mcc_info(phl, spec_role->hw_band);
en_info = &minfo->en_info;
if (MCC_RUNING != minfo->state) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "rtw_phl_mcc_disable(): MCC_RUNING != m_info->state, please check code flow\n");
_mcc_dump_state(&minfo->state);
goto exit;
}
if (NULL == (m_role = _mcc_get_mrole_by_wrole(minfo, spec_role))) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "rtw_phl_mcc_disable(): Can't get mrole, wrole id(%d), please check code flow\n",
spec_role->id);
goto exit;
}
_mcc_set_state(minfo, MCC_TRIGGER_FW_DIS);
if (rtw_hal_mcc_disable(phl->hal, m_role->group, m_role->macid)
!= RTW_HAL_STATUS_SUCCESS) {
status = RTW_PHL_STATUS_FAILURE;
_mcc_set_state(minfo, MCC_FW_DIS_FAIL);
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_,"rtw_phl_mcc_disable(): Disable FW mcc Fail\n");
goto exit;
}
rtw_hal_sync_cur_ch(phl->hal, spec_role->hw_band, spec_role->chandef);
_mcc_set_state(minfo, MCC_STOP);
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "rtw_phl_mcc_disable(): Disable FW mcc ok\n");
if (minfo->mcc_mode == RTW_PHL_TDMRA_AP_CLIENT_WMODE)
_mcc_clean_noa(phl, en_info);
status = RTW_PHL_STATUS_SUCCESS;
exit:
return status;
}
enum rtw_phl_status rtw_phl_tdmra_duration_change(struct phl_info_t *phl,
struct phl_tdmra_dur_change_info *info)
{
enum rtw_phl_status status = RTW_PHL_STATUS_FAILURE;
struct phl_mcc_info *minfo = NULL;
struct rtw_phl_mcc_en_info *en_info = NULL;
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, ">>> rtw_phl_tdmra_duration_change()\n");
if (!is_mcc_init(phl)) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "rtw_phl_tdmra_duration_change(): mcc is not init, please check code\n");
goto exit;
}
minfo = get_mcc_info(phl, info->hw_band);
en_info = &minfo->en_info;
if (MCC_RUNING != minfo->state) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "rtw_phl_tdmra_duration_change(): MCC_RUNING != m_info->state, please check code flow\n");
_mcc_dump_state(&minfo->state);
goto exit;
}
if (true == info->bt_role) {
status = rtw_phl_mcc_bt_duration_change(phl, info);
} else {
status = rtw_phl_mcc_duration_change(phl, info);
}
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "<<< rtw_phl_tdmra_duration_change(): status(%d)\n",
status);
exit:
return status;
}
enum rtw_phl_status rtw_phl_tdmra_enable(struct phl_info_t *phl,
struct rtw_wifi_role_t *cur_role)
{
enum rtw_phl_status status = RTW_PHL_STATUS_FAILURE;
struct hw_band_ctl_t *band_ctrl = get_band_ctrl(phl, cur_role->hw_band);
struct mr_info *cur_info = &band_ctrl->cur_info;
u8 chanctx_num = 0;
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, ">>> rtw_phl_tdmra_enable\n");
chanctx_num = (u8)phl_mr_get_chanctx_num(phl, band_ctrl);
if (2 == chanctx_num) {
status = rtw_phl_mcc_enable(phl, cur_role);
} else if (1 == chanctx_num) {
if ((1 == cur_info->ap_num || 1 == cur_info->p2p_go_num) &&
(cur_info->ld_sta_num == 0 || cur_info->lg_sta_num == 0)) {
status = rtw_phl_mcc_ap_bt_coex_enable(phl, cur_role);
} else {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "rtw_phl_tdmra_enable(): Not support this type\n");
PHL_DUMP_MR_EX(phl);
}
} else {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "rtw_phl_tdmra_enable(): Not support for chanctx_num(%d)\n",
chanctx_num);
}
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "<<< rtw_phl_tdmra_enable(): status(%d)\n",
status);
return status;
}
/*
* Stop tdmra
* @ spec_role: You want to fw switch ch to the specific ch of the role when fw stop tdma
*/
enum rtw_phl_status rtw_phl_tdmra_disable(struct phl_info_t *phl,
struct rtw_wifi_role_t *spec_role)
{
enum rtw_phl_status status = RTW_PHL_STATUS_FAILURE;
struct phl_mcc_info *minfo = NULL;
struct rtw_phl_mcc_en_info *en_info = NULL;
struct rtw_phl_mcc_role *m_role = NULL;
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, ">>> rtw_phl_tdmra_disable()\n");
if (!is_mcc_init(phl)) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "rtw_phl_tdmra_disable(): mcc is not init, please check code\n");
goto exit;
}
minfo = get_mcc_info(phl, spec_role->hw_band);
en_info = &minfo->en_info;
if (MCC_RUNING != minfo->state) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "rtw_phl_tdmra_disable(): MCC_RUNING != m_info->state, please check code flow\n");
_mcc_dump_state(&minfo->state);
goto exit;
}
if (NULL == (m_role = _mcc_get_mrole_by_wrole(minfo, spec_role))) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "rtw_phl_tdmra_disable(): Can't get mrole, wrole id(%d), please check code flow\n",
spec_role->id);
#ifdef RTW_WKARD_TDMRA_AUTO_GET_STAY_ROLE
m_role = get_ref_role(en_info);
if (m_role == NULL) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "%s(): Can't get mrole from ref_mrole, please check code flow\n",
__func__);
goto exit;
} else {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "%s(): Workaround get mrore from ref_mrole\n",
__func__);
}
#else
goto exit;
#endif /* RTW_WKARD_TDMRA_AUTO_GET_STAY_ROLE */
}
_mcc_set_state(minfo, MCC_TRIGGER_FW_DIS);
if (rtw_hal_mcc_disable(phl->hal, m_role->group, m_role->macid)
!= RTW_HAL_STATUS_SUCCESS) {
status = RTW_PHL_STATUS_FAILURE;
_mcc_set_state(minfo, MCC_FW_DIS_FAIL);
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_,"rtw_phl_tdmra_disable(): Disable FW mcc Fail\n");
goto exit;
}
rtw_hal_sync_cur_ch(phl->hal, m_role->wrole->hw_band, *m_role->chandef);
_mcc_set_state(minfo, MCC_STOP);
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "rtw_phl_tdmra_disable(): Disable FW mcc ok\n");
if (minfo->mcc_mode == RTW_PHL_TDMRA_AP_CLIENT_WMODE ||
minfo->mcc_mode == RTW_PHL_TDMRA_AP_WMODE)
_mcc_clean_noa(phl, en_info);
status = RTW_PHL_STATUS_SUCCESS;
exit:
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "<<< rtw_phl_tdmra_disable(): status(%d)\n",
status);
return status;
}
enum rtw_phl_status rtw_phl_mcc_init_ops(struct phl_info_t *phl, struct rtw_phl_mcc_ops *ops)
{
enum rtw_phl_status status = RTW_PHL_STATUS_FAILURE;
struct phl_com_mcc_info *com_info = NULL;
if (!is_mcc_init(phl)) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "rtw_phl_mcc_init_ops(): mcc is not init, please check code\n");
goto exit;
}
com_info = phl_to_com_mcc_info(phl);
com_info->ops.priv = ops->priv;
com_info->ops.mcc_update_noa = ops->mcc_update_noa;
com_info->ops.mcc_get_setting = ops->mcc_get_setting;
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "rtw_phl_mcc_init_ops(): init ok\n");
status = RTW_PHL_STATUS_SUCCESS;
exit:
return status;
}
enum rtw_phl_status rtw_phl_mcc_init(struct phl_info_t *phl)
{
enum rtw_phl_status status = RTW_PHL_STATUS_FAILURE;
struct rtw_phl_com_t *phl_com = phl->phl_com;
void *drv = phl_to_drvpriv(phl);
struct mr_ctl_t *mr_ctl = phlcom_to_mr_ctrl(phl_com);
struct hw_band_ctl_t *band_ctrl = NULL;
u8 band_idx = 0;
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, ">>> rtw_phl_mcc_init()\n");
set_mcc_init_state(phl, false);
if (mr_ctl->com_mcc == NULL) {
mr_ctl->com_mcc = _os_mem_alloc(drv, sizeof(struct phl_com_mcc_info));
if (mr_ctl->com_mcc != NULL) {
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "rtw_phl_mcc_init(): Allocate phl_com_mcc_info\n");
} else {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "rtw_phl_mcc_init(): Allocate phl_com_mcc_info Fail\n");
goto deinit;
}
}
for (band_idx = 0; band_idx < MAX_BAND_NUM; band_idx++) {
band_ctrl = &mr_ctl->band_ctrl[band_idx];
if (band_ctrl->mcc_info == NULL) {
band_ctrl->mcc_info = _os_mem_alloc(drv,
sizeof(struct phl_mcc_info));
if (band_ctrl->mcc_info != NULL) {
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "rtw_phl_mcc_init(): Allocate mcc_info for HW Band(%d)\n",
band_idx);
} else {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "rtw_phl_mcc_init(): Allocate mcc_info fail for HW Band(%d)\n",
band_idx);
goto deinit;
}
}
}
set_mcc_init_state(phl, true);
status = RTW_PHL_STATUS_SUCCESS;
goto exit;
deinit:
rtw_phl_mcc_deinit(phl);
exit:
return status;
}
void rtw_phl_mcc_deinit(struct phl_info_t *phl)
{
struct rtw_phl_com_t *phl_com = phl->phl_com;
void *drv = phl_to_drvpriv(phl);
struct phl_com_mcc_info *com_info = phl_to_com_mcc_info(phl);
struct mr_ctl_t *mr_ctl = phlcom_to_mr_ctrl(phl_com);
struct hw_band_ctl_t *band_ctrl = NULL;
u8 band_idx = 0;
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, ">>> rtw_phl_mcc_deinit()\n");
if (com_info != NULL) {
_os_mem_free(drv, com_info, sizeof(struct phl_com_mcc_info));
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "rtw_phl_mcc_deinit(): Free phl_com_mcc_info\n");
}
for (band_idx = 0; band_idx < MAX_BAND_NUM; band_idx++) {
band_ctrl = &mr_ctl->band_ctrl[band_idx];
if (band_ctrl->mcc_info != NULL) {
_os_mem_free(drv, band_ctrl->mcc_info,
sizeof(struct phl_mcc_info));
PHL_TRACE(COMP_PHL_MCC, _PHL_INFO_, "rtw_phl_mcc_deinit(): Free phl_mcc_info, HwBand(%d)\n",
band_idx);
}
}
set_mcc_init_state(phl, false);
}
bool rtw_phl_mcc_get_dbg_info(struct phl_info_t *phl, u8 band_idx,
enum rtw_phl_mcc_dbg_type type, void *info)
{
bool get_ok = false;
struct phl_mcc_info *minfo = NULL;
struct rtw_phl_mcc_en_info *en_info = NULL;
if (!is_mcc_init(phl)) {
PHL_TRACE(COMP_PHL_MCC, _PHL_ERR_, "rtw_phl_mcc_get_dbg_info(): mcc is not init, please check code\n");
goto exit;
}
minfo = get_mcc_info(phl, band_idx);
en_info = &minfo->en_info;
if (MCC_DBG_STATE == type) {
*(enum rtw_phl_mcc_state *)info = minfo->state;
} else if (MCC_DBG_OP_MODE == type) {
*(enum rtw_phl_tdmra_wmode *)info = minfo->mcc_mode;
} else if (MCC_DBG_COEX_MODE == type) {
*(enum rtw_phl_mcc_coex_mode *)info = minfo->coex_mode;
} else if (MCC_DBG_BT_INFO == type) {
_os_mem_cpy(phl_to_drvpriv(phl), info, &minfo->bt_info,
sizeof(struct rtw_phl_mcc_bt_info));
} else if (MCC_DBG_EN_INFO == type) {
_os_mem_cpy(phl_to_drvpriv(phl), info, en_info,
sizeof(struct rtw_phl_mcc_en_info));
} else {
goto exit;
}
get_ok = true;
exit:
return get_ok;
}
#endif /* CONFIG_MCC_SUPPORT */
|
2301_81045437/rtl8852be
|
phl/phl_mcc.c
|
C
|
agpl-3.0
| 146,933
|
/******************************************************************************
*
* Copyright(c) 2019 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#ifndef _PHL_MCC_H_
#define _PHL_MCC_H_
/* MCC definition for private usage */
#define phl_to_com_mcc_info(_phl) ((struct phl_com_mcc_info *)(phl_to_mr_ctrl(_phl)->com_mcc))
#define get_mcc_info(_phl, _band) ((struct phl_mcc_info *)((get_band_ctrl(_phl, _band)->mcc_info)))
#define set_mcc_init_state(_phl, _state) (((struct mr_ctl_t *)phl_to_mr_ctrl(_phl))->init_mcc = _state)
#define is_mcc_init(_phl) (((struct mr_ctl_t *)phl_to_mr_ctrl(_phl))->init_mcc == true)
#define get_ref_role(_en_info) ((struct rtw_phl_mcc_role *)&(_en_info->mcc_role[_en_info->ref_role_idx]))
#define EARLY_TX_BCN_T 10
#define MIN_TX_BCN_T 10
#define EARLY_RX_BCN_T 5
#define MIN_RX_BCN_T 10
#define MIN_GO_STA_OFFSET_T 15
#define MIN_CLIENT_DUR (EARLY_RX_BCN_T + MIN_RX_BCN_T)
#define MIN_AP_DUR (EARLY_TX_BCN_T + MIN_GO_STA_OFFSET_T - EARLY_RX_BCN_T)
#define MIN_BCNS_OFFSET (EARLY_RX_BCN_T + MIN_RX_BCN_T)
#define MAX_MCC_GROUP_ROLE 2
#define DEFAULT_AP_DUR 60
#define DEFAULT_CLIENT_DUR 40
#define MCC_DUR_NONSPECIFIC 0xff
#define CLIENTS_WORSECASE_REF_TOA 30
#define CLIENTS_WORSECASE_SMALL_DUR 60
#define CLIENTS_WORSECASE_LARGE_DUR 90
#define WORSECASE_INTVL 150
#define MIN_TRIGGER_MCC_TIME 300/*TU*/
#define CLIENTS_TRACKING_TH 3
#define CLIENTS_TRACKING_WORSECASE_TH 3
#define CLIENTS_TRACKING_COURTESY_TH 3
#define CLIENTS_TRACKING_CRITICAL_POINT_TH 2
#define HANDLE_BCN_INTVL 100
#define BT_DUR_SEG_TH 20
#define AP_CLIENT_OFFSET 40
#define REF_ROLE_IDX 0
#define BT_DUR_MAX_2WS 33 /*The max bt slot for 2wifi slot and 1 bt slot*/
enum _mcc_minfo_reset_type {
MINFO_RESET_EN_INFO = BIT(0),
MINFO_RESET_MODE = BIT(1),
MINFO_RESET_ROLE_MAP = BIT(2),
MINFO_RESET_STATE = BIT(3),
MINFO_RESET_COEX_MODE = BIT(4),
MINFO_RESET_BT_INFO = BIT(5),
MINFO_RESET_PATTERN_INFO = BIT(6),
MINFO_RESET_ALL = 0xFF
};
enum _mcc_role_cat {
MCC_ROLE_NONE = 0,
MCC_ROLE_AP_CAT,
MCC_ROLE_CLIENT_CAT
};
struct phl_mcc_fw_log_info {
bool en_fw_mcc_log;
u8 fw_mcc_log_lv;/* fw mcc log level */
bool update; /*if update = true, we need to update setting to fw.*/
};
struct phl_mcc_info {
struct rtw_phl_mcc_en_info en_info;
enum rtw_phl_tdmra_wmode mcc_mode;
u8 role_map; /*the wifi role map in operating mcc */
enum rtw_phl_mcc_state state;
enum rtw_phl_mcc_coex_mode coex_mode;
struct rtw_phl_mcc_bt_info bt_info;
struct phl_mcc_fw_log_info fw_log_i;
};
#endif /*_PHL_MCC_H_*/
|
2301_81045437/rtl8852be
|
phl/phl_mcc.h
|
C
|
agpl-3.0
| 3,022
|
/******************************************************************************
*
* Copyright(c) 2019 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#ifndef _PHL_MCC_DEF_H_
#define _PHL_MCC_DEF_H_
/* MCC definition for public usage in phl layer */
#ifdef CONFIG_MCC_SUPPORT
struct phl_com_mcc_info {
struct rtw_phl_mcc_ops ops;
};
struct phl_mcc_dur_lim_req_info {
enum rtw_phl_mcc_dur_lim_tag tag;
bool enable;
u32 start_t_h; /*start time of Prohibit slot*/
u32 start_t_l;
u32 dur; /*Prohibit time slot, unit: us*/
u32 intvl; /*Prohibit internval, unit us*/
};
struct phl_tdmra_dur_change_info {
bool bt_role;
u8 hw_band;
struct rtw_wifi_role_t *role;
u16 dur;
};
void rtw_phl_mcc_watchdog(struct phl_info_t *phl, u8 band_idx);
void rtw_phl_mcc_sta_entry_change(struct phl_info_t *phl,
struct rtw_wifi_role_t *wrole);
void phl_mcc_client_link_notify_for_ap(struct phl_info_t *phl,
struct rtw_wifi_role_t *wrole, enum role_state state);
enum rtw_phl_status rtw_phl_mcc_dur_lim_change(struct phl_info_t *phl,
struct rtw_wifi_role_t *wrole,
struct phl_mcc_dur_lim_req_info *lim_req);
bool rtw_phl_mcc_inprogress(struct phl_info_t *phl, u8 band_idx);
enum rtw_phl_status rtw_phl_mcc_enable(struct phl_info_t *phl,
struct rtw_wifi_role_t *cur_role);
enum rtw_phl_status rtw_phl_mcc_disable(struct phl_info_t *phl,
struct rtw_wifi_role_t *spec_role);
enum rtw_phl_status rtw_phl_tdmra_duration_change(struct phl_info_t *phl,
struct phl_tdmra_dur_change_info *info);
enum rtw_phl_status rtw_phl_tdmra_enable(struct phl_info_t *phl,
struct rtw_wifi_role_t *cur_role);
enum rtw_phl_status rtw_phl_tdmra_disable(struct phl_info_t *phl,
struct rtw_wifi_role_t *spec_role);
enum rtw_phl_status rtw_phl_mcc_init_ops(struct phl_info_t *phl, struct rtw_phl_mcc_ops *ops);
enum rtw_phl_status rtw_phl_mcc_init(struct phl_info_t *phl);
void rtw_phl_mcc_deinit(struct phl_info_t *phl);
bool rtw_phl_mcc_get_dbg_info(struct phl_info_t *phl, u8 band_idx,
enum rtw_phl_mcc_dbg_type type, void *info);
#else /* CONFIG_MCC_SUPPORT ==0 */
#define rtw_phl_mcc_watchdog(_phl, _band_idx)
#define phl_mcc_client_link_notify_for_ap(_phl, _wrole, _state)
#define rtw_phl_mcc_enable(_phl,_cur_role) RTW_PHL_STATUS_FAILURE
#define rtw_phl_mcc_disable(_phl,_spec_role) RTW_PHL_STATUS_FAILURE
#define rtw_phl_mcc_init_ops(_phl, _ops) RTW_PHL_STATUS_FAILURE
#define rtw_phl_mcc_init(_phl) RTW_PHL_STATUS_FAILURE
#define rtw_phl_mcc_deinit(_phl)
#define rtw_phl_mcc_inprogress(_phl, _band_idx) false
#define rtw_phl_tdmra_duration_change(_phl, _info) RTW_PHL_STATUS_FAILURE
#define rtw_phl_tdmra_enable(_phl, _cur_role) RTW_PHL_STATUS_FAILURE
#define rtw_phl_tdmra_disable(_phl, _spec_role) RTW_PHL_STATUS_FAILURE
#define rtw_phl_mcc_sta_entry_change(_phl, _wrole);
#endif /* CONFIG_MCC_SUPPORT */
#endif /*_PHL_MCC_DEF_H_*/
|
2301_81045437/rtl8852be
|
phl/phl_mcc_def.h
|
C
|
agpl-3.0
| 3,361
|
/******************************************************************************
*
* Copyright(c) 2019 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#ifndef _PHL_MR_H_
#define _PHL_MR_H_
static inline int
phl_mr_get_chanctx_num(struct phl_info_t *phl_info, struct hw_band_ctl_t *band_ctrl)
{
void *drv = phl_to_drvpriv(phl_info);
int chanctx_num = 0;
_os_spinlock(drv, &band_ctrl->chan_ctx_queue.lock, _ps, NULL);
chanctx_num = band_ctrl->chan_ctx_queue.cnt;
_os_spinunlock(drv, &band_ctrl->chan_ctx_queue.lock, _ps, NULL);
return chanctx_num;
}
static inline u8
phl_mr_get_role_num(struct phl_info_t *phl_info,
struct hw_band_ctl_t *band_ctrl)
{
void *drv = phl_to_drvpriv(phl_info);
u8 i;
u8 role_num = 0;
_os_spinlock(drv, &band_ctrl->lock, _ps, NULL);
for (i = 0; i < MAX_WIFI_ROLE_NUMBER; i++) {
if (band_ctrl->role_map & BIT(i)) {
role_num++;
}
}
_os_spinunlock(drv, &band_ctrl->lock, _ps, NULL);
return role_num;
}
enum rtw_phl_status
phl_mr_sync_chandef(struct phl_info_t *phl_info, struct hw_band_ctl_t *band_ctrl,
struct rtw_chan_ctx *chanctx, struct rtw_chan_def *chandef);
enum rtw_phl_status
phl_mr_chandef_chg(struct phl_info_t *phl,
struct rtw_wifi_role_t *wrole, struct rtw_chan_def *new_chan,
struct rtw_chan_def *chctx_result);
enum rtw_phl_status
phl_mr_chandef_upt(struct phl_info_t *phl_info,
struct hw_band_ctl_t *band_ctrl, struct rtw_chan_ctx *chanctx);
enum rtw_phl_status
phl_mr_get_chandef(struct phl_info_t *phl_info, struct rtw_wifi_role_t *wifi_role,
bool sync, struct rtw_chan_def *chandef);
enum rtw_phl_status
phl_mr_ctrl_init(struct phl_info_t *phl_info);
enum rtw_phl_status
phl_mr_ctrl_deinit(struct phl_info_t *phl_info);
#ifdef CONFIG_MCC_SUPPORT
enum rtw_phl_status
phl_mr_mcc_query_role_time_slot_lim (struct phl_info_t *phl_info, struct rtw_wifi_role_t *wrole,
struct phl_mcc_dur_lim_req_info *limit_req_info);
enum rtw_phl_status
phl_mr_coex_handle(struct phl_info_t *phl, struct rtw_wifi_role_t *cur_wrole,
u16 slot, enum phl_band_idx band_idx, enum mr_coex_trigger trgger);
enum rtw_phl_status
phl_mr_coex_disable(struct phl_info_t *phl,
struct rtw_wifi_role_t *cur_wrole, enum phl_band_idx band_idx,
enum mr_coex_trigger trgger);
#endif
#ifdef CONFIG_PHL_P2PPS
bool phl_mr_noa_dur_lim_change (struct phl_info_t *phl_info, struct rtw_wifi_role_t *wrole,
struct rtw_phl_noa_desc *noa_desc);
#endif
enum rtw_phl_status
phl_mr_info_upt(struct phl_info_t *phl_info, struct rtw_wifi_role_t *wrole);
enum rtw_phl_status
phl_mr_state_upt(struct phl_info_t *phl_info, struct rtw_wifi_role_t *wrole);
enum rtw_phl_status
rtw_phl_mr_rx_filter(void *phl, struct rtw_wifi_role_t *wrole);
enum rtw_phl_status
phl_mr_tsf_sync(void *phl, struct rtw_wifi_role_t *wrole,
enum role_state state);
void phl_mr_stop_all_beacon(struct phl_info_t *phl_info,
struct rtw_wifi_role_t *wrole,
bool stop);
enum rtw_phl_status
phl_mr_offch_hdl(struct phl_info_t *phl_info,
struct rtw_wifi_role_t *wrole,
bool off_ch,
void *obj_priv,
u8 (*issue_null_data)(void *priv, u8 ridx, bool ps));
enum rtw_phl_status phl_mr_watchdog(struct phl_info_t *phl_info);
u8 rtw_phl_mr_get_opch_list(void *phl, struct rtw_wifi_role_t *wifi_role,
struct rtw_chan_def *chdef_list, u8 list_size);
void
phl_mr_check_ecsa(struct phl_info_t *phl_info,
struct rtw_wifi_role_t *wrole);
void
phl_mr_check_ecsa_cancel(struct phl_info_t *phl_info,
struct rtw_wifi_role_t *wrole);
#ifdef CONFIG_MCC_SUPPORT
u8 phl_mr_query_mcc_inprogress (struct phl_info_t *phl_info, struct rtw_wifi_role_t *wrole,
enum rtw_phl_mcc_chk_inprocess_type check_type);
#endif
#ifdef DBG_PHL_MR
enum rtw_phl_status phl_mr_info_dbg(struct phl_info_t *phl_info);
void phl_mr_dump_cur_chandef(const char *caller, const int line, bool show_caller,
struct phl_info_t *phl_info, struct rtw_wifi_role_t *wifi_role);
#define PHL_DUMP_CUR_CHANDEF(_phl_info, _wrole) \
phl_mr_dump_cur_chandef(__FUNCTION__, __LINE__, false, _phl_info, _wrole);
#define PHL_DUMP_CUR_CHANDEF_EX(_phl_info, _wrole) \
phl_mr_dump_cur_chandef(__FUNCTION__, __LINE__, true, _phl_info, _wrole);
void phl_mr_dump_chctx_info(const char *caller, const int line, bool show_caller,
struct phl_info_t *phl_info, struct phl_queue *chan_ctx_queue, struct rtw_chan_ctx *chanctx);
#define PHL_DUMP_CHAN_CTX(_phl_info, _chctx_q, _chctx) \
phl_mr_dump_chctx_info(__FUNCTION__, __LINE__, false, _phl_info, _chctx_q, _chctx);
#define PHL_DUMP_CHAN_CTX_EX(_phl_info, _chctx_q, _chctx) \
phl_mr_dump_chctx_info(__FUNCTION__, __LINE__, true, _phl_info, _chctx_q, _chctx);
void phl_mr_dump_band_info(const char *caller, const int line, bool show_caller,
struct phl_info_t *phl_info, struct hw_band_ctl_t *band_ctrl);
#define PHL_DUMP_BAND_CTL(_phl_info, band_ctl) \
phl_mr_dump_band_info(__FUNCTION__, __LINE__, false, _phl_info, band_ctl)
#define PHL_DUMP_BAND_CTL_EX(_phl_info, band_ctl) \
phl_mr_dump_band_info(__FUNCTION__, __LINE__, true, _phl_info, band_ctl)
void phl_mr_dump_role_info(const char *caller, const int line, bool show_caller,
struct phl_info_t *phl_info, struct rtw_wifi_role_t *wrole);
#define PHL_DUMP_ROLE(_phl_info, _wrole) \
phl_mr_dump_role_info(__FUNCTION__, __LINE__, false, _phl_info, _wrole)
#define PHL_DUMP_ROLE_EX(_phl_info, _wrole) \
phl_mr_dump_role_info(__FUNCTION__, __LINE__, true, _phl_info, _wrole)
void phl_mr_dump_info(const char *caller, const int line, bool show_caller,
struct phl_info_t *phl_info);
#define PHL_DUMP_MR(_phl_info) phl_mr_dump_info(__FUNCTION__, __LINE__, false, _phl_info)
#define PHL_DUMP_MR_EX(_phl_info) phl_mr_dump_info(__FUNCTION__, __LINE__, true, _phl_info)
#else
#define PHL_DUMP_CUR_CHANDEF(_phl_info, _wrole)
#define PHL_DUMP_CUR_CHANDEF_EX(_phl_info, _wrole)
#define PHL_DUMP_CHAN_CTX(_phl_info, _chctx_q, _chctx)
#define PHL_DUMP_CHAN_CTX_EX(_phl_info, _chctx_q, _chctx)
#define PHL_DUMP_BAND_CTL(_phl_info, band_ctl)
#define PHL_DUMP_BAND_CTL_EX(_phl_info, band_ctl)
#define PHL_DUMP_ROLE(_phl_info, _wrole)
#define PHL_DUMP_ROLE_EX(_phl_info, _wrole)
#define PHL_DUMP_MR(_phl_info)
#define PHL_DUMP_MR_EX(_phl_info)
#endif
#endif /*_PHL_MR_H_*/
|
2301_81045437/rtl8852be
|
phl/phl_mr.h
|
C
|
agpl-3.0
| 6,700
|
/******************************************************************************
*
* Copyright(c) 2019 - 2020 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* Author: vincent_fann@realtek.com
*
*****************************************************************************/
#include "phl_headers.h"
#define MODL_MASK_LEN (PHL_BK_MDL_END / 8)
#define MAX_MSG_NUM (16)
enum msg_hub_status {
MSG_HUB_INIT = BIT0,
MSG_HUB_STARTED = BIT1,
};
enum msg_recver_status {
MSG_RECVER_INIT = BIT0,
MSG_RECVER_CLR_CTX = BIT1,
};
struct phl_msg_receiver_ex {
u8 status;
u8 bitmap[ MODL_MASK_LEN ];
struct phl_msg_receiver ctx;
};
struct phl_msg_ex {
_os_list list;
struct phl_msg ctx;
struct msg_completion_routine completion;
};
/**
* phl_msg_hub - responsible for phl msg forwarding,
* @status: contain mgnt status flags, refer to enum msg_hub_status
* @msg_pool: msg extension pool
* @msg_notify_thread: thread fot forwarding msg
* @recver: msg receiver, refer to enum phl_msg_recver_layer
*/
struct phl_msg_hub {
u32 status;
struct phl_msg_ex msg_pool[MAX_MSG_NUM];
struct phl_queue idle_msg_q;
struct phl_queue wait_msg_q;
_os_sema msg_q_sema;
_os_thread msg_notify_thread;
/* for core & phl layer respectively */
struct phl_msg_receiver_ex recver[MSG_RECV_MAX];
};
inline static u8 _is_bitmap_empty(void* d, u8* bitmap){
u8 empty[MODL_MASK_LEN] = {0};
return (!_os_mem_cmp(d, bitmap, empty, MODL_MASK_LEN))?(true):(false);
}
static u8 pop_front_idle_msg(struct phl_info_t* phl, struct phl_msg_ex** msg)
{
void *d = phl_to_drvpriv(phl);
struct phl_msg_hub* hub = (struct phl_msg_hub*)phl->msg_hub;
_os_list* new_msg = NULL;
(*msg) = NULL;
if(pq_pop(d, &hub->idle_msg_q, &new_msg, _first, _bh)) {
(*msg) = (struct phl_msg_ex*)new_msg;
_os_mem_set(d, &((*msg)->ctx), 0, sizeof(struct phl_msg));
(*msg)->completion.completion = NULL;
(*msg)->completion.priv = NULL;
return true;
}
else
return false;
}
static void push_back_idle_msg(struct phl_info_t* phl, struct phl_msg_ex* ex)
{
void *d = phl_to_drvpriv(phl);
struct phl_msg_hub* hub = (struct phl_msg_hub*)phl->msg_hub;
if(ex->completion.completion)
ex->completion.completion(ex->completion.priv, &(ex->ctx));
pq_push(d, &hub->idle_msg_q, &ex->list, _tail, _bh);
}
static u8 pop_front_wait_msg(struct phl_info_t* phl, struct phl_msg_ex** msg)
{
void *d = phl_to_drvpriv(phl);
struct phl_msg_hub* hub = (struct phl_msg_hub*)phl->msg_hub;
_os_list* new_msg = NULL;
(*msg) = NULL;
if(pq_pop(d, &hub->wait_msg_q, &new_msg, _first, _bh)) {
(*msg) = (struct phl_msg_ex*)new_msg;
return true;
}
else
return false;
}
static void push_back_wait_msg(struct phl_info_t* phl, struct phl_msg_ex* ex)
{
void *d = phl_to_drvpriv(phl);
struct phl_msg_hub* hub = (struct phl_msg_hub*)phl->msg_hub;
pq_push(d, &hub->wait_msg_q, &ex->list, _tail, _bh);
_os_sema_up(d, &(hub->msg_q_sema));
}
void msg_forward(struct phl_info_t* phl, struct phl_msg_ex* ex)
{
void *d = phl_to_drvpriv(phl);
u8 i = 0;
struct phl_msg_hub* hub = (struct phl_msg_hub*)phl->msg_hub;
struct phl_msg_receiver_ex* recver = NULL;
u8 module_id = MSG_MDL_ID_FIELD(ex->ctx.msg_id);
if (!TEST_STATUS_FLAG(hub->status, MSG_HUB_STARTED)) {
PHL_INFO("%s, msg hub not working\n",__FUNCTION__);
return;
}
for(i = 0; i < MSG_RECV_MAX; i++) {
recver = &(hub->recver[i]);
if(!TEST_STATUS_FLAG(recver->status, MSG_RECVER_INIT)) {
if(TEST_STATUS_FLAG(recver->status, MSG_RECVER_CLR_CTX))
_os_mem_set(d, recver, 0, sizeof(struct phl_msg_receiver_ex));
continue;
}
if(_chk_bitmap_bit(recver->bitmap, module_id)) {
PHL_TRACE(COMP_PHL_DBG, _PHL_DEBUG_, "%s notify %d layer\n",
__FUNCTION__, i);
recver->ctx.incoming_evt_notify(recver->ctx.priv,
&(ex->ctx));
PHL_TRACE(COMP_PHL_DBG, _PHL_DEBUG_, "%s notify %d layer\n",
__FUNCTION__, i);
}
}
}
int msg_thread_hdl(void* param)
{
struct phl_info_t* phl = (struct phl_info_t *)param;
void *d = phl_to_drvpriv(phl);
struct phl_msg_hub* hub = (struct phl_msg_hub*)phl->msg_hub;
struct phl_msg_ex* ex = NULL;
PHL_INFO("%s enter\n",__FUNCTION__);
while(!_os_thread_check_stop(d, &(hub->msg_notify_thread))) {
_os_sema_down(d, &hub->msg_q_sema);
if(_os_thread_check_stop(d, &(hub->msg_notify_thread)))
break;
while(pop_front_wait_msg(phl, &ex)){
msg_forward(phl, ex);
push_back_idle_msg(phl, ex);
}
}
while (hub->idle_msg_q.cnt != MAX_MSG_NUM) {
while(pop_front_wait_msg(phl, &ex))
push_back_idle_msg(phl, ex);
}
_os_thread_wait_stop(d, &(hub->msg_notify_thread));
PHL_INFO("%s down\n",__FUNCTION__);
return 0;
}
enum rtw_phl_status phl_msg_hub_init(struct phl_info_t* phl)
{
struct phl_msg_hub* hub = NULL;
void *d = phl_to_drvpriv(phl);
if(phl->msg_hub != NULL)
return RTW_PHL_STATUS_FAILURE;
hub = (struct phl_msg_hub *)_os_mem_alloc(d,
sizeof(struct phl_msg_hub));
if(hub == NULL) {
PHL_ERR("%s, alloc fail\n",__FUNCTION__);
return RTW_PHL_STATUS_RESOURCE;
}
phl->msg_hub = hub;
_os_sema_init(d, &(hub->msg_q_sema), 0);
pq_init(d, &(hub->idle_msg_q));
pq_init(d, &(hub->wait_msg_q));
SET_STATUS_FLAG(hub->status, MSG_HUB_INIT);
PHL_INFO("%s\n",__FUNCTION__);
return RTW_PHL_STATUS_SUCCESS;
}
enum rtw_phl_status phl_msg_hub_deinit(struct phl_info_t* phl)
{
struct phl_msg_hub* hub = (struct phl_msg_hub*)phl->msg_hub;
void *d = phl_to_drvpriv(phl);
if(!TEST_STATUS_FLAG(hub->status, MSG_HUB_INIT))
return RTW_PHL_STATUS_FAILURE;
CLEAR_STATUS_FLAG(hub->status, MSG_HUB_INIT);
phl_msg_hub_stop(phl);
pq_deinit(d, &(hub->idle_msg_q));
pq_deinit(d, &(hub->wait_msg_q));
_os_sema_free(d, &(hub->msg_q_sema));
_os_mem_free(d, hub, sizeof(struct phl_msg_hub));
PHL_INFO("%s\n",__FUNCTION__);
return RTW_PHL_STATUS_SUCCESS;
}
enum rtw_phl_status phl_msg_hub_start(struct phl_info_t* phl)
{
u8 i = 0;
struct phl_msg_hub* hub = (struct phl_msg_hub*)phl->msg_hub;
void *d = phl_to_drvpriv(phl);
if(!TEST_STATUS_FLAG(hub->status, MSG_HUB_INIT)||
TEST_STATUS_FLAG(hub->status, MSG_HUB_STARTED))
return RTW_PHL_STATUS_FAILURE;
_os_mem_set(d, hub->msg_pool, 0,
sizeof(struct phl_msg_ex) * MAX_MSG_NUM );
pq_reset(d, &(hub->idle_msg_q), _bh);
pq_reset(d, &(hub->wait_msg_q), _bh);
for(i = 0; i < MAX_MSG_NUM; i++) {
pq_push(d, &hub->idle_msg_q, &hub->msg_pool[i].list, _tail, _bh);
}
_os_thread_init(d, &(hub->msg_notify_thread), msg_thread_hdl, phl,
"msg_notify_thread");
_os_thread_schedule(d, &(hub->msg_notify_thread));
SET_STATUS_FLAG(hub->status, MSG_HUB_STARTED);
PHL_INFO("%s\n",__FUNCTION__);
return RTW_PHL_STATUS_SUCCESS;
}
enum rtw_phl_status phl_msg_hub_stop(struct phl_info_t* phl)
{
struct phl_msg_hub* hub = (struct phl_msg_hub*)phl->msg_hub;
void *d = phl_to_drvpriv(phl);
if(!TEST_STATUS_FLAG(hub->status, MSG_HUB_STARTED))
return RTW_PHL_STATUS_FAILURE;
CLEAR_STATUS_FLAG(hub->status, MSG_HUB_STARTED);
_os_thread_stop(d, &(hub->msg_notify_thread));
_os_sema_up(d, &(hub->msg_q_sema));
_os_thread_deinit(d, &(hub->msg_notify_thread));
pq_reset(d, &(hub->idle_msg_q), _bh);
pq_reset(d, &(hub->wait_msg_q), _bh);
PHL_INFO("%s\n",__FUNCTION__);
return RTW_PHL_STATUS_SUCCESS;
}
enum rtw_phl_status phl_msg_hub_send(struct phl_info_t* phl,
struct phl_msg_attribute* attr, struct phl_msg* msg)
{
struct phl_msg_hub* hub = (struct phl_msg_hub*)phl->msg_hub;
void *d = phl_to_drvpriv(phl);
struct phl_msg_ex* ex = NULL;
if(!TEST_STATUS_FLAG(hub->status, MSG_HUB_STARTED) || msg == NULL)
return RTW_PHL_STATUS_FAILURE;
if(!pop_front_idle_msg(phl, &ex)) {
PHL_ERR(" %s idle msg empty\n",__FUNCTION__);
return RTW_PHL_STATUS_RESOURCE;
}
_os_mem_cpy(d, &(ex->ctx), msg, sizeof(struct phl_msg));
if(attr && attr->completion.completion) {
ex->completion.completion = attr->completion.completion;
ex->completion.priv = attr->completion.priv;
}
push_back_wait_msg(phl, ex);
PHL_TRACE(COMP_PHL_DBG, _PHL_DEBUG_, "%s, msg_id:0x%x enqueue\n",
__FUNCTION__, msg->msg_id);
return RTW_PHL_STATUS_SUCCESS;
}
enum rtw_phl_status phl_msg_hub_register_recver(void* phl,
struct phl_msg_receiver* ctx, enum phl_msg_recver_layer layer)
{
struct phl_info_t* phl_info = (struct phl_info_t*)phl;
struct phl_msg_hub* hub = (struct phl_msg_hub*)phl_info->msg_hub;
void *d = phl_to_drvpriv(phl_info);
struct phl_msg_receiver_ex* recver = NULL;
if(!TEST_STATUS_FLAG(hub->status, MSG_HUB_INIT) ||
layer >= MSG_RECV_MAX ||
ctx == NULL)
return RTW_PHL_STATUS_FAILURE;
recver = &(hub->recver[layer]);
if(TEST_STATUS_FLAG(recver->status, MSG_RECVER_INIT)) {
PHL_ERR("%s, layer registered\n",__FUNCTION__);
return RTW_PHL_STATUS_FAILURE;
}
_os_mem_cpy(d, &(recver->ctx), ctx, sizeof(struct phl_msg_receiver));
_os_mem_set(d, &(recver->bitmap), 0, MODL_MASK_LEN);
SET_STATUS_FLAG(recver->status, MSG_RECVER_INIT);
PHL_INFO("%s\n",__FUNCTION__);
return RTW_PHL_STATUS_SUCCESS;
}
enum rtw_phl_status phl_msg_hub_update_recver_mask(void* phl,
enum phl_msg_recver_layer layer, u8* mdl_id, u32 len, u8 clr)
{
struct phl_info_t* phl_info = (struct phl_info_t*)phl;
struct phl_msg_hub* hub = (struct phl_msg_hub*)phl_info->msg_hub;
struct phl_msg_receiver_ex* recver = NULL;
if(!TEST_STATUS_FLAG(hub->status, MSG_HUB_INIT) ||
layer >= MSG_RECV_MAX)
return RTW_PHL_STATUS_FAILURE;
recver = &(hub->recver[layer]);
if(!TEST_STATUS_FLAG(recver->status, MSG_RECVER_INIT)) {
PHL_ERR("%s, layer not registered\n",__FUNCTION__);
return RTW_PHL_STATUS_FAILURE;
}
if(clr == true)
_clr_bitmap_bit(recver->bitmap, mdl_id, len);
else
_add_bitmap_bit(recver->bitmap, mdl_id, len);
PHL_INFO(" %s\n",__FUNCTION__);
return RTW_PHL_STATUS_SUCCESS;
}
enum rtw_phl_status phl_msg_hub_deregister_recver(void* phl,
enum phl_msg_recver_layer layer)
{
struct phl_info_t* phl_info = (struct phl_info_t*)phl;
struct phl_msg_hub* hub = (struct phl_msg_hub*)phl_info->msg_hub;
struct phl_msg_receiver_ex* recver = NULL;
if(!TEST_STATUS_FLAG(hub->status, MSG_HUB_INIT) ||
layer >= MSG_RECV_MAX)
return RTW_PHL_STATUS_FAILURE;
recver = &(hub->recver[layer]);
if(!TEST_STATUS_FLAG(recver->status, MSG_RECVER_INIT)) {
PHL_ERR("%s, layer not registered\n",__FUNCTION__);
return RTW_PHL_STATUS_FAILURE;
}
CLEAR_STATUS_FLAG(recver->status, MSG_RECVER_INIT);
SET_STATUS_FLAG(recver->status, MSG_RECVER_CLR_CTX);
PHL_INFO("%s\n",__FUNCTION__);
return RTW_PHL_STATUS_SUCCESS;
}
/* handling msg hub event with PHL_MDL_PHY_MGNT as module id */
void phl_msg_hub_phy_mgnt_evt_hdlr(struct phl_info_t* phl, u16 evt_id)
{
PHL_INFO("%s : evt_id %d.\n", __func__, evt_id);
switch (evt_id) {
case MSG_EVT_FWDL_OK:
break;
case MSG_EVT_FWDL_FAIL:
break;
case MSG_EVT_DUMP_PLE_BUFFER:
rtw_phl_ser_dump_ple_buffer(phl);
break;
default:
break;
}
}
void phl_msg_hub_rx_evt_hdlr(struct phl_info_t* phl, u16 evt_id,
u8 *buf, u32 len)
{
PHL_DBG("%s : evt_id %d.\n", __func__, evt_id);
switch (evt_id) {
case HAL_C2H_EV_BB_MUGRP_DOWN:
break;
case MSG_EVT_TSF32_TOG:
PHL_TRACE(COMP_PHL_P2PPS, _PHL_INFO_, "[NOA]phl_msg_hub_rx_evt_hdlr():toggle happen!!\n");
phl_p2pps_tsf32_tog_handler(phl);
break;
case MSG_EVT_DBG_RX_DUMP:
phl_rx_dbg_dump(phl, HW_PHY_0);
break;
default:
break;
}
}
|
2301_81045437/rtl8852be
|
phl/phl_msg_hub.c
|
C
|
agpl-3.0
| 11,675
|
/******************************************************************************
*
* Copyright(c) 2019 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#ifndef __PHL_PHY_MSG_FWD_H_
#define __PHL_PHY_MSG_FWD_H_
enum rtw_phl_status phl_msg_hub_init(struct phl_info_t *phl);
enum rtw_phl_status phl_msg_hub_deinit(struct phl_info_t *phl);
enum rtw_phl_status phl_msg_hub_start(struct phl_info_t *phl);
enum rtw_phl_status phl_msg_hub_stop(struct phl_info_t *phl);
enum rtw_phl_status phl_msg_hub_send(struct phl_info_t *phl,
struct phl_msg_attribute *attr,
struct phl_msg *msg);
enum rtw_phl_status
phl_msg_hub_register_recver(void *phl, struct phl_msg_receiver *ctx,
enum phl_msg_recver_layer layer);
enum rtw_phl_status
phl_msg_hub_update_recver_mask(void *phl, enum phl_msg_recver_layer layer,
u8 *mdl_id, u32 len, u8 clr);
enum rtw_phl_status
phl_msg_hub_deregister_recver(void *phl, enum phl_msg_recver_layer layer);
void phl_msg_hub_phy_mgnt_evt_hdlr(struct phl_info_t *phl, u16 evt_id);
void phl_msg_hub_rx_evt_hdlr(struct phl_info_t *phl, u16 evt_id, u8 *buf,
u32 len);
#endif
|
2301_81045437/rtl8852be
|
phl/phl_msg_hub.h
|
C
|
agpl-3.0
| 1,624
|
/******************************************************************************
*
* Copyright(c) 2019 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#define _PHL_NOTIFY_C_
#include "phl_headers.h"
#ifdef CONFIG_CMD_DISP
struct cmd_notify_param {
u8 hw_idx;
void *hal_cmd;
enum phl_msg_evt_id event;
};
static void _phl_notify_done(void *drv_priv, u8 *cmd, u32 cmd_len, enum rtw_phl_status status)
{
if (cmd) {
_os_kmem_free(drv_priv, cmd, cmd_len);
cmd = NULL;
PHL_INFO("%s.....\n", __func__);
}
}
enum rtw_phl_status
phl_notify_cmd_hdl(struct phl_info_t *phl_info, u8 *param)
{
struct cmd_notify_param *cmd_notify = (struct cmd_notify_param *)param;
if (cmd_notify->event == MSG_EVT_NOTIFY_BB ||
cmd_notify->event == MSG_EVT_NOTIFY_RF ||
cmd_notify->event == MSG_EVT_NOTIFY_MAC)
rtw_hal_cmd_notification(phl_info->hal,
cmd_notify->event,
cmd_notify->hal_cmd,
cmd_notify->hw_idx);
else
rtw_hal_notification(phl_info->hal, cmd_notify->event, cmd_notify->hw_idx);
return RTW_PHL_STATUS_SUCCESS;
}
enum rtw_phl_status
rtw_phl_cmd_notify(struct rtw_phl_com_t *phl_com,
enum phl_msg_evt_id event,
void *hal_cmd,
u8 hw_idx)
{
void *drv = phlcom_to_drvpriv(phl_com);
enum rtw_phl_status psts = RTW_PHL_STATUS_FAILURE;
struct cmd_notify_param *param = NULL;
u32 param_len = 0;
param_len = sizeof(struct cmd_notify_param);
param = _os_kmem_alloc(drv, param_len);
if (param == NULL) {
PHL_ERR("%s: alloc param failed!\n", __func__);
psts = RTW_PHL_STATUS_RESOURCE;
goto error_param;
}
_os_mem_set(drv, param, 0, param_len);
param->event = event;
param->hw_idx = hw_idx;
param->hal_cmd = hal_cmd;
psts = phl_cmd_enqueue(phl_com->phl_priv,
hw_idx,
MSG_EVT_NOTIFY_HAL,
(u8 *)param,
param_len,
_phl_notify_done,
PHL_CMD_NO_WAIT,
0);
if (is_cmd_failure(psts)) {
/* Send cmd success, but wait cmd fail*/
psts = RTW_PHL_STATUS_FAILURE;
} else if (psts != RTW_PHL_STATUS_SUCCESS) {
/* Send cmd fail */
_os_kmem_free(drv, param, param_len);
psts = RTW_PHL_STATUS_FAILURE;
}
error_param:
return psts;
}
#endif /* CONFIG_CMD_DISP */
void rtw_phl_notification(void *phl,
enum phl_msg_evt_id event,
struct rtw_wifi_role_t *wrole)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
#ifdef CONFIG_CMD_DISP
rtw_phl_cmd_notify(phl_info->phl_com, event, NULL, wrole->hw_band);
#else
PHL_TRACE(COMP_PHL_DBG, _PHL_INFO_, "%s: not support cmd notify\n",
__func__);
rtw_hal_notification(phl_info->hal, event, wrole->hw_band);
#endif /* CONFIG_CMD_DISP */
}
void rtw_phl_dev_terminate_ntf(void *phl)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
SET_STATUS_FLAG(phl_info->phl_com->dev_state, RTW_DEV_SURPRISE_REMOVAL);
phl_disp_eng_notify_shall_stop(phl_info);
rtw_hal_notification(phl_info->hal, MSG_EVT_SURPRISE_REMOVE, HW_BAND_MAX);
}
|
2301_81045437/rtl8852be
|
phl/phl_notify.c
|
C
|
agpl-3.0
| 3,633
|
/******************************************************************************
*
* Copyright(c) 2019 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#ifndef _PHL_NOTIFY_H_
#define _PHL_NOTIFY_H_
#ifdef CONFIG_CMD_DISP
enum rtw_phl_status
rtw_phl_cmd_notify(struct rtw_phl_com_t *phl_com,
enum phl_msg_evt_id event,
void *hal_cmd,
u8 hw_idx);
enum rtw_phl_status
phl_notify_cmd_hdl(struct phl_info_t *phl_info, u8 *param);
#endif /* CONFIG_CMD_DISP */
#endif /*_PHL_NOTIFY_H_*/
|
2301_81045437/rtl8852be
|
phl/phl_notify.h
|
C
|
agpl-3.0
| 1,038
|
/******************************************************************************
*
* Copyright(c) 2020 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#define _PHL_P2PPS_C_
#include "phl_headers.h"
#ifdef RTW_WKARD_P2PPS_REFINE
#ifdef CONFIG_PHL_P2PPS
enum rtw_phl_status phl_p2pps_init(struct phl_info_t *phl)
{
enum rtw_phl_status status = RTW_PHL_STATUS_SUCCESS;
struct rtw_phl_com_t *phl_com = phl->phl_com;
struct rtw_phl_p2pps_info *info;
info = (struct rtw_phl_p2pps_info *)_os_mem_alloc(phl_to_drvpriv(phl),
sizeof(*info));
if (info == NULL)
return RTW_PHL_STATUS_RESOURCE;
_os_mem_set(phl_to_drvpriv(phl),
info, 0, sizeof(*info));
phl_com->p2pps_info = (void*)info;
info->phl_info = phl;
_os_spinlock_init(phl_to_drvpriv(phl), &info->p2pps_lock);
return status;
}
void phl_p2pps_deinit(struct phl_info_t *phl_info)
{
struct rtw_phl_com_t *phl_com = phl_info->phl_com;
struct rtw_phl_p2pps_info *info ;
info = (struct rtw_phl_p2pps_info *)phl_com->p2pps_info;
if (info) {
_os_spinlock_free(phl_to_drvpriv(phl_info), &info->p2pps_lock);
_os_mem_free(phl_to_drvpriv(phl_info), info, sizeof(*info));
}
phl_com->p2pps_info = NULL;
}
void
_phl_p2pps_dump_single_noa_desc(struct rtw_phl_noa_desc *desc)
{
PHL_TRACE(COMP_PHL_P2PPS, _PHL_INFO_, "[NOA]_phl_p2pps_dump_single_noa_desc():enable = %d\n",
desc->enable);
PHL_TRACE(COMP_PHL_P2PPS, _PHL_INFO_, "[NOA]_phl_p2pps_dump_single_noa_desc():start_t_h = 0x%x\n",
desc->start_t_h);
PHL_TRACE(COMP_PHL_P2PPS, _PHL_INFO_, "[NOA]_phl_p2pps_dump_single_noa_desc():start_t_l = 0x%x\n",
desc->start_t_l);
PHL_TRACE(COMP_PHL_P2PPS, _PHL_INFO_, "[NOA]_phl_p2pps_dump_single_noa_desc():interval = %d\n",
desc->interval);
PHL_TRACE(COMP_PHL_P2PPS, _PHL_INFO_, "[NOA]_phl_p2pps_dump_single_noa_desc():duration = %d\n",
desc->duration);
PHL_TRACE(COMP_PHL_P2PPS, _PHL_INFO_, "[NOA]_phl_p2pps_dump_single_noa_desc():count = %d\n",
desc->count);
PHL_TRACE(COMP_PHL_P2PPS, _PHL_INFO_, "[NOA]_phl_p2pps_dump_single_noa_desc():noa_id = %d\n",
desc->noa_id);
PHL_TRACE(COMP_PHL_P2PPS, _PHL_INFO_, "[NOA]_phl_p2pps_dump_single_noa_desc():tag = %d\n",
desc->tag);
PHL_TRACE(COMP_PHL_P2PPS, _PHL_INFO_, "[NOA]_phl_p2pps_dump_single_noa_desc():w_role = 0x%p\n",
desc->w_role);
}
void
_phl_p2pps_dump_noa_table(struct rtw_phl_p2pps_info *psinfo,
struct rtw_phl_noa_info *info)
{
void *drvpriv = phlcom_to_drvpriv(psinfo->phl_info->phl_com);
struct rtw_phl_noa_desc *desc = NULL;
u8 i = 0;
PHL_TRACE(COMP_PHL_P2PPS, _PHL_INFO_, "[NOA]_phl_p2pps_dump_noa_table():====>\n");
_os_spinlock(drvpriv, &psinfo->p2pps_lock, _bh, NULL);
PHL_TRACE(COMP_PHL_P2PPS, _PHL_INFO_, "[NOA] info.en_desc_num = %d, pause = %d\n",
info->en_desc_num, info->paused);
for (i = 0; i < MAX_NOA_DESC; i++) {
desc = &info->noa_desc[i];
PHL_TRACE(COMP_PHL_P2PPS, _PHL_INFO_, "[NOA]================DESC[%d]==================\n",
i);
PHL_TRACE(COMP_PHL_P2PPS, _PHL_INFO_, "[NOA]_phl_p2pps_dump_noa_table():enable = %d\n",
desc->enable);
PHL_TRACE(COMP_PHL_P2PPS, _PHL_INFO_, "[NOA]_phl_p2pps_dump_noa_table():start_t_h = 0x%x\n",
desc->start_t_h);
PHL_TRACE(COMP_PHL_P2PPS, _PHL_INFO_, "[NOA]_phl_p2pps_dump_noa_table():start_t_l = 0x%x\n",
desc->start_t_l);
PHL_TRACE(COMP_PHL_P2PPS, _PHL_INFO_, "[NOA]_phl_p2pps_dump_noa_table():interval = %d\n",
desc->interval);
PHL_TRACE(COMP_PHL_P2PPS, _PHL_INFO_, "[NOA]_phl_p2pps_dump_noa_table():duration = %d\n",
desc->duration);
PHL_TRACE(COMP_PHL_P2PPS, _PHL_INFO_, "[NOA]_phl_p2pps_dump_noa_table():count = %d\n",
desc->count);
PHL_TRACE(COMP_PHL_P2PPS, _PHL_INFO_, "[NOA]_phl_p2pps_dump_noa_table():noa_id = %d\n",
desc->noa_id);
PHL_TRACE(COMP_PHL_P2PPS, _PHL_INFO_, "[NOA]_phl_p2pps_dump_noa_table():tag = %d\n",
desc->tag);
PHL_TRACE(COMP_PHL_P2PPS, _PHL_INFO_, "[NOA]_phl_p2pps_dump_noa_table():w_role = 0x%p\n",
desc->w_role);
PHL_TRACE(COMP_PHL_P2PPS, _PHL_INFO_, "[NOA]================DESC[%d]==================\n",
i);
}
_os_spinunlock(drvpriv, &psinfo->p2pps_lock, _bh, NULL);
}
struct rtw_phl_noa_info *
_phl_p2pps_get_noa_info_by_role(struct rtw_phl_p2pps_info *psinfo,
struct rtw_wifi_role_t *wrole)
{
u8 idx = get_role_idx(wrole);
return &psinfo->noa_info[idx];
}
struct rtw_phl_noa_desc *
_phl_p2pps_get_first_noa_desc_with_cnt255(struct phl_info_t *phl,
struct rtw_phl_noa_info *info)
{
u8 i = 0;
struct rtw_phl_noa_desc *tmp_desc;
for (i = 0; i < MAX_NOA_DESC; i++) {
tmp_desc = &info->noa_desc[i];
if(tmp_desc->count == 255 && tmp_desc->enable) {
PHL_TRACE(COMP_PHL_P2PPS, _PHL_INFO_, "[NOA]_phl_p2pps_get_first_noa_desc_with_cnt255():get desc, tag = %d!!\n",
tmp_desc->tag);
return tmp_desc;
}
}
return NULL;
}
#ifdef RTW_WKARD_P2PPS_SINGLE_NOA
u8
_phl_p2pps_query_mcc_inprog_wkard(struct phl_info_t *phl_info,
struct rtw_wifi_role_t *w_role)
{
u8 ret = false;
#ifdef CONFIG_MCC_SUPPORT
//ret = phl_mr_query_mcc_inprogress(phl_info, w_role,
// RTW_PHL_MCC_CHK_INPROGRESS);
#endif
return ret;
}
struct rtw_wifi_role_t *
_phl_get_role_by_band_port(struct phl_info_t* phl_info,
u8 hw_band,
u8 hw_port)
{
struct rtw_phl_com_t *phl_com = phl_info->phl_com;
struct mr_ctl_t *mr_ctl = phlcom_to_mr_ctrl(phl_com);
struct hw_band_ctl_t *band_ctrl = &(mr_ctl->band_ctrl[hw_band]);
struct rtw_wifi_role_t *wrole = NULL;
u8 ridx = 0;
for (ridx = 0; ridx < MAX_WIFI_ROLE_NUMBER; ridx++) {
if (!(band_ctrl->role_map & BIT(ridx)))
continue;
wrole = phl_get_wrole_by_ridx(phl_info, ridx);
if (wrole == NULL)
continue;
if (wrole->hw_band == hw_band && wrole->hw_port == hw_port) {
PHL_TRACE(COMP_PHL_P2PPS, _PHL_INFO_, "[NOA]_phl_get_role_by_band_port():role_id(%d) hw_band = %d, hw_port = %d\n",
ridx, wrole->hw_band, wrole->hw_port);
return wrole;
}
}
return NULL;
}
void
_phl_p2pps_calc_next_noa_s_time(struct phl_info_t *phl_info,
struct rtw_wifi_role_t *w_role,
struct rtw_phl_tsf32_tog_rpt *rpt,
struct rtw_phl_noa_desc *orig_desc,
struct rtw_phl_noa_desc *new_desc)
{
void *d = phl_to_drvpriv(phl_info);
u64 new_st = 0, old_st = 0;
u64 tog_t = 0, delta_t = 0, intv_cnt = 0;
_os_mem_cpy(d, new_desc, orig_desc, sizeof(*orig_desc));
old_st = (((u64)orig_desc->start_t_h << 32) | orig_desc->start_t_l);
PHL_TRACE(COMP_PHL_P2PPS, _PHL_INFO_, "[NOA]_phl_p2pps_calc_next_noa_s_time():old_st: 0x%08x %08x\n",
(u32)(old_st >> 32), (u32)old_st);
tog_t = (((u64)rpt->tsf_h << 32) | rpt->tsf_l);
PHL_TRACE(COMP_PHL_P2PPS, _PHL_INFO_, "[NOA]_phl_p2pps_calc_next_noa_s_time():tog_t = 0x%08x %08x\n",
(u32)(tog_t >> 32), (u32)tog_t);
delta_t = tog_t - old_st;
PHL_TRACE(COMP_PHL_P2PPS, _PHL_INFO_, "[NOA]_phl_p2pps_calc_next_noa_s_time():delta_t = 0x%08x %08x\n",
(u32)(delta_t >> 32), (u32)delta_t);
intv_cnt = _os_division64(delta_t, new_desc->interval) + 1;
PHL_TRACE(COMP_PHL_P2PPS, _PHL_INFO_, "[NOA]_phl_p2pps_calc_next_noa_s_time():intv_cnt = 0x%08x %08x\n",
(u32)(intv_cnt >> 32), (u32)intv_cnt);
new_st = old_st + (intv_cnt * new_desc->interval);
PHL_TRACE(COMP_PHL_P2PPS, _PHL_INFO_, "[NOA]_phl_p2pps_calc_next_noa_s_time():new_st = 0x%08x %08x\n",
(u32)(new_st >> 32), (u32)new_st);
new_desc->start_t_h = new_st >> 32;
new_desc->start_t_l = new_st & 0xFFFFFFFF;
}
void _phl_p2pps_ap_on_tsf32_tog(struct phl_info_t* phl_info,
struct rtw_wifi_role_t *wrole,
struct rtw_phl_tsf32_tog_rpt *rpt)
{
struct rtw_phl_p2pps_info *psinfo = phl_to_p2pps_info(phl_info);
struct rtw_phl_noa_info *info = NULL;
struct rtw_phl_noa_desc *orig_desc = NULL;
struct rtw_phl_noa_desc new_desc = {0};
void *d = phl_to_drvpriv(phl_info);
info = _phl_p2pps_get_noa_info_by_role(psinfo, wrole);
orig_desc = _phl_p2pps_get_first_noa_desc_with_cnt255(phl_info, info);
if (orig_desc) {
_phl_p2pps_calc_next_noa_s_time(phl_info, wrole, rpt,
orig_desc, &new_desc);
_os_mem_cpy(d, orig_desc, &new_desc, sizeof(new_desc));
_phl_p2pps_dump_single_noa_desc(&new_desc);
if(psinfo->ops.tsf32_tog_update_single_noa)
psinfo->ops.tsf32_tog_update_single_noa(d, wrole, &new_desc);
} else {
return;
}
}
#endif
void phl_p2pps_tsf32_tog_handler(struct phl_info_t* phl_info)
{
void *hal = phl_info->hal;
struct rtw_phl_tsf32_tog_rpt rpt = {0};
struct rtw_wifi_role_t *wrole = NULL;
enum rtw_hal_status h_stat;
h_stat = rtw_hal_get_tsf32_tog_rpt(hal, &rpt);
if (h_stat != RTW_HAL_STATUS_SUCCESS)
return;
if (!rpt.valid) {
PHL_TRACE(COMP_PHL_P2PPS, _PHL_WARNING_, "[NOA]phl_p2pps_tsf32_tog_handler():report not valid!!\n");
return;
}
wrole = _phl_get_role_by_band_port(phl_info, rpt.band, rpt.port);
if (wrole) {
if (wrole->type == PHL_RTYPE_AP) {
PHL_TRACE(COMP_PHL_P2PPS, _PHL_INFO_, "[NOA]phl_p2pps_tsf32_tog_handler():role(%d) is AP/GO mode, handle noa update\n",
wrole->id);
#ifdef RTW_WKARD_P2PPS_SINGLE_NOA
_phl_p2pps_ap_on_tsf32_tog(phl_info, wrole, &rpt);
#endif
} else if (wrole->type == PHL_RTYPE_STATION) {
PHL_TRACE(COMP_PHL_P2PPS, _PHL_INFO_, "[NOA]phl_p2pps_tsf32_tog_handler():role(%d) is STA/GO mode, currently do nothing\n",
wrole->id);
/*Call NoA disable all?*/
}
} else {
PHL_TRACE(COMP_PHL_P2PPS, _PHL_WARNING_, "[NOA]phl_p2pps_tsf32_tog_handler():NULL ROLE!!, hwband = %d, hwport = %d\n",
rpt.band, rpt.port);
}
}
void
_phl_p2pps_copy_noa_desc(struct rtw_phl_p2pps_info *psinfo,
struct rtw_phl_noa_desc *dest,
struct rtw_phl_noa_desc *src)
{
void *drvpriv = phlcom_to_drvpriv(psinfo->phl_info->phl_com);
_os_spinlock(drvpriv, &psinfo->p2pps_lock, _bh, NULL);
_os_mem_cpy(drvpriv, dest, src, sizeof(struct rtw_phl_noa_desc));
_os_spinunlock(drvpriv, &psinfo->p2pps_lock, _bh, NULL);
}
void
_phl_p2pps_clear_noa_desc(struct rtw_phl_p2pps_info *psinfo,
struct rtw_phl_noa_desc *desc)
{
void *drvpriv = phlcom_to_drvpriv(psinfo->phl_info->phl_com);
_os_spinlock(drvpriv, &psinfo->p2pps_lock, _bh, NULL);
_os_mem_set(drvpriv, desc, 0, sizeof(struct rtw_phl_noa_desc));
_os_spinunlock(drvpriv, &psinfo->p2pps_lock, _bh, NULL);
}
void
_phl_p2pps_noa_increase_desc(struct rtw_phl_p2pps_info *psinfo,
struct rtw_phl_noa_info *info)
{
void *drvpriv = phlcom_to_drvpriv(psinfo->phl_info->phl_com);
_os_spinlock(drvpriv, &psinfo->p2pps_lock, _bh, NULL);
info->en_desc_num++;
_os_spinunlock(drvpriv, &psinfo->p2pps_lock, _bh, NULL);
}
void
_phl_p2pps_noa_decrease_desc(struct rtw_phl_p2pps_info *psinfo,
struct rtw_phl_noa_info *info)
{
void *drvpriv = phlcom_to_drvpriv(psinfo->phl_info->phl_com);
_os_spinlock(drvpriv, &psinfo->p2pps_lock, _bh, NULL);
if (info->en_desc_num > 0)
info->en_desc_num--;
else
PHL_TRACE(COMP_PHL_P2PPS, _PHL_WARNING_, "[NOA]_phl_p2pps_noa_decrease_desc():info->en_desc_num == 0! Flow error\n");
_os_spinunlock(drvpriv, &psinfo->p2pps_lock, _bh, NULL);
}
u8
_phl_p2pps_noa_should_activate(struct rtw_phl_p2pps_info *psinfo,
struct rtw_phl_noa_desc *in_desc)
{
u8 ret = true;
if (in_desc->tag == P2PPS_TRIG_GO) {
ret = true;
} else if (in_desc->tag == P2PPS_TRIG_GC) {
ret = true;
} else if (in_desc->tag == P2PPS_TRIG_GC_255) {
ret = true;
} else if (in_desc->tag == P2PPS_TRIG_2G_SCC_1AP_1STA_BT) {
ret = true;
} else if (in_desc->tag == P2PPS_TRIG_MCC) {
ret = false;
#ifdef RTW_WKARD_P2PPS_NOA_MCC
goto exit;
#endif
}
#ifdef RTW_WKARD_P2PPS_SINGLE_NOA
/*Currently should only notify MRC for limit request*/
/*Under count == 255 case */
if (in_desc->count != 255) {
if (_phl_p2pps_query_mcc_inprog_wkard(psinfo->phl_info,
in_desc->w_role)) {
PHL_TRACE(COMP_PHL_P2PPS, _PHL_WARNING_, "[NOA]_phl_p2pps_noa_should_activate():mcc in progress and noa requset != 255, currently not handling!\n");
ret = false;
}
} else {
/* open when mr ready*/
/*
if (phl_mr_noa_dur_lim_change(psinfo->phl_info,
in_desc->w_role, in_desc)) {
PHL_TRACE(COMP_PHL_P2PPS, _PHL_INFO_, "[NOA]_phl_p2pps_noa_should_activate():mrc take over this req!\n");
ret = false;
}
*/
}
#endif
exit:
PHL_TRACE(COMP_PHL_P2PPS, _PHL_INFO_, "[NOA]_phl_p2pps_noa_should_activate():tag = %d, return = %d\n",
in_desc->tag, ret);
return ret;
}
u8
_phl_p2pps_noa_is_all_disable(struct rtw_phl_p2pps_info *psinfo,
struct rtw_phl_noa_info *info)
{
u8 i = 0;
void *drvpriv = phlcom_to_drvpriv(psinfo->phl_info->phl_com);
_os_spinlock(drvpriv, &psinfo->p2pps_lock, _bh, NULL);
for (i = 0; i < MAX_NOA_DESC; i++) {
struct rtw_phl_noa_desc *desc = &info->noa_desc[i];
if(desc->enable) {
_os_spinunlock(drvpriv, &psinfo->p2pps_lock, _bh, NULL);
return false;
}
}
_os_spinunlock(drvpriv, &psinfo->p2pps_lock, _bh, NULL);
return true;
}
u8
_phl_p2pps_noa_assign_noaid(struct rtw_phl_p2pps_info *psinfo,
struct rtw_phl_noa_info *info,
struct rtw_phl_noa_desc *desc)
{
u8 max = 0, id = NOAID_NONE, i = 0;
void *drvpriv = phlcom_to_drvpriv(psinfo->phl_info->phl_com);
_os_spinlock(drvpriv, &psinfo->p2pps_lock, _bh, NULL);
if (info->en_desc_num == 0) {
id = 0;/*not inited flow*/
} else {
for (i = 0; i < MAX_NOA_DESC; i++) {
if (info->noa_desc[i].noa_id == NOAID_NONE)
continue;
if (info->noa_desc[i].noa_id > max)
max = info->noa_desc[i].noa_id;
}
if(max != 0)
id = max + 1;
else id = 0;
}
_os_spinunlock(drvpriv, &psinfo->p2pps_lock, _bh, NULL);
PHL_TRACE(COMP_PHL_P2PPS, _PHL_INFO_, "[NOA]_phl_p2pps_noa_assign_noaid(): Final ID = %d.\n",
id);
return id;
}
enum rtw_phl_status
_phl_p2pps_noa_disable(struct rtw_phl_p2pps_info *psinfo,
struct rtw_phl_noa_info *noa_info,
struct rtw_phl_noa_desc *noa_desc,
u8 clear_desc)
{
enum rtw_phl_status ret = RTW_PHL_STATUS_FAILURE;
enum rtw_hal_status hal_ret = RTW_HAL_STATUS_FAILURE;
void *drvpriv = phlcom_to_drvpriv(psinfo->phl_info->phl_com);
void *hal = psinfo->phl_info->hal;
struct rtw_phl_stainfo_t *sta_info;
struct rtw_wifi_role_t *w_role = NULL;
struct phl_info_t *phl_info = psinfo->phl_info;
u8 en_to_fw = 0;
u8 idx = 0;
if (noa_info->paused && clear_desc) {
PHL_TRACE(COMP_PHL_P2PPS, _PHL_INFO_, "[NOA]_phl_p2pps_noa_disable():NoA info is in puase state, clear desc only!\n");
_phl_p2pps_clear_noa_desc(psinfo,noa_desc);
return RTW_PHL_STATUS_SUCCESS;
}
w_role = noa_desc->w_role;
_os_spinlock(drvpriv, &psinfo->p2pps_lock, _bh, NULL);
en_to_fw = (noa_desc->noa_id != NOAID_NONE && noa_desc->enable);
_os_spinunlock(drvpriv, &psinfo->p2pps_lock, _bh, NULL);
PHL_TRACE(COMP_PHL_P2PPS, _PHL_INFO_, "[NoA]%s(): en_to_fw(%d), clear_desc(%d)\n",
__func__, en_to_fw, clear_desc);
if (en_to_fw) {
sta_info = rtw_phl_get_stainfo_self(psinfo->phl_info,
noa_desc->w_role);
hal_ret = rtw_hal_noa_disable(hal, noa_info, noa_desc,
sta_info->macid);
if (hal_ret!= RTW_HAL_STATUS_SUCCESS) {
PHL_TRACE(COMP_PHL_P2PPS, _PHL_ERR_, "[NOA]_phl_p2pps_noa_disable():NoA Disable fail! tag = %d, ID = %d, HAL return = %d\n",
noa_desc->tag, noa_desc->noa_id, hal_ret);
ret = RTW_PHL_STATUS_FAILURE;
} else {
_phl_p2pps_noa_decrease_desc(psinfo,noa_info);
ret = RTW_PHL_STATUS_SUCCESS;
if (clear_desc)
_phl_p2pps_clear_noa_desc(psinfo,noa_desc);
}
} else {
/*not enabled to fw case*/
ret = RTW_PHL_STATUS_SUCCESS;
if (clear_desc)
_phl_p2pps_clear_noa_desc(psinfo,noa_desc);
}
if(RTW_PHL_STATUS_SUCCESS == ret) {
if(NULL != w_role) {
/* notify BTC */
/* copy noa_desc array to w_role*/
for (idx = 0; idx < MAX_NOA_DESC; idx ++) {
_phl_p2pps_copy_noa_desc(psinfo,
w_role->noa_desc + idx,
noa_info->noa_desc + idx);
}
phl_role_noa_notify(phl_info, w_role);
} else {
PHL_TRACE(COMP_PHL_P2PPS, _PHL_WARNING_, "[NOA]_phl_p2pps_noa_disable():w_role in noa_desc is NULL, not to notify to BTC\n");
}
}
return ret;
}
void _phl_p2pps_noa_disable_all(struct phl_info_t *phl,
struct rtw_wifi_role_t *w_role)
{
struct rtw_phl_p2pps_info *psinfo = phl_to_p2pps_info(phl);
u8 role_id = get_role_idx(w_role);
struct rtw_phl_noa_info *noa_info = &psinfo->noa_info[role_id];
u8 i = 0;
PHL_TRACE(COMP_PHL_P2PPS, _PHL_INFO_, "[NOA]_phl_p2pps_noa_disable_all():====>\n");
PHL_TRACE(COMP_PHL_P2PPS, _PHL_INFO_, "[NOA]_phl_p2pps_noa_disable_all():Disable all NoA for wrole(%d)!\n",
role_id);
_phl_p2pps_dump_noa_table(phl_to_p2pps_info(phl),noa_info);
for (i = 0; i < MAX_NOA_DESC; i++) {
struct rtw_phl_noa_desc *desc = &noa_info->noa_desc[i];
if (desc->enable) {
_phl_p2pps_noa_disable(psinfo, noa_info, desc, true);
}
}
noa_info->paused = false;
_phl_p2pps_dump_noa_table(phl_to_p2pps_info(phl),noa_info);
PHL_TRACE(COMP_PHL_P2PPS, _PHL_INFO_, "[NOA]_phl_p2pps_noa_disable_all():<====\n");
}
enum rtw_phl_status
_phl_p2pps_noa_enable(struct rtw_phl_p2pps_info *psinfo,
struct rtw_phl_noa_info *noa_info,
struct rtw_phl_noa_desc *noa_desc,
struct rtw_phl_noa_desc *in_desc)
{
enum rtw_phl_status ret = RTW_PHL_STATUS_FAILURE;
enum rtw_hal_status hal_ret = RTW_HAL_STATUS_FAILURE;
void *hal = psinfo->phl_info->hal;
struct rtw_phl_stainfo_t *sta_info;
struct rtw_wifi_role_t *w_role = NULL;
struct phl_info_t *phl_info = psinfo->phl_info;
u8 idx = 0;
PHL_TRACE(COMP_PHL_P2PPS, _PHL_INFO_, "[NoA]%s()\n",
__func__);
_phl_p2pps_copy_noa_desc(psinfo, noa_desc, in_desc);
/* get w_role */
w_role = noa_desc->w_role;
if(NULL != w_role) {
/* notify BTC */
/* copy noa_desc array to w_role */
for (idx = 0; idx < MAX_NOA_DESC; idx ++) {
_phl_p2pps_copy_noa_desc(psinfo,
w_role->noa_desc+idx,
noa_info->noa_desc+idx);
}
phl_role_noa_notify(phl_info, w_role);
} else {
PHL_TRACE(COMP_PHL_P2PPS, _PHL_WARNING_, "[NOA]_phl_p2pps_noa_enable():w_role in noa_desc is NULL, not to notify to BTC\n");
}
if (noa_info->paused) {
PHL_TRACE(COMP_PHL_P2PPS, _PHL_INFO_, "[NOA]_phl_p2pps_noa_enable():NoA is in pause state, record request and leave\n");
return RTW_PHL_STATUS_SUCCESS;
}
if (_phl_p2pps_noa_should_activate(psinfo, noa_desc)) {
noa_desc->noa_id = _phl_p2pps_noa_assign_noaid(psinfo, noa_info,
noa_desc);
sta_info = rtw_phl_get_stainfo_self(psinfo->phl_info,
noa_desc->w_role);
hal_ret = rtw_hal_noa_enable(hal, noa_info, noa_desc,
sta_info->macid);
if (hal_ret != RTW_HAL_STATUS_SUCCESS) {
PHL_TRACE(COMP_PHL_P2PPS, _PHL_ERR_, "[NOA]_phl_p2pps_noa_enable():NoA enable fail! tag = %d, ID = %d, HAL return = %d\n",
noa_desc->tag, noa_desc->noa_id, hal_ret);
noa_desc->noa_id = NOAID_NONE;
if (hal_ret == RTW_HAL_STATUS_RESOURCE)
ret = RTW_PHL_STATUS_RESOURCE;
else
ret = RTW_PHL_STATUS_FAILURE;
} else {
PHL_TRACE(COMP_PHL_P2PPS, _PHL_INFO_, "[NOA]_phl_p2pps_noa_enable():NoA enable SUCCESS! tag = %d, ID = %d, HAL return = %d\n",
noa_desc->tag, noa_desc->noa_id, hal_ret);
_phl_p2pps_noa_increase_desc(psinfo,noa_info);
ret = RTW_PHL_STATUS_SUCCESS;
}
} else {
noa_desc->noa_id = NOAID_NONE; /*not activate*/
ret = RTW_PHL_STATUS_SUCCESS;
}
return ret;
}
void
phl_p2pps_noa_resume_all(struct phl_info_t *phl,
struct rtw_wifi_role_t *w_role)
{
struct rtw_phl_p2pps_info *psinfo = phl_to_p2pps_info(phl);
u8 role_idx = get_role_idx(w_role);
struct rtw_phl_noa_info *noa_info = &psinfo->noa_info[role_idx];
u8 i = 0;
PHL_TRACE(COMP_PHL_P2PPS, _PHL_INFO_, "[NOA]phl_p2pps_noa_resume_all():====>\n");
if (!noa_info->paused) {
PHL_TRACE(COMP_PHL_P2PPS, _PHL_INFO_, "[NOA]phl_p2pps_noa_resume_all():NoA not paused on role:%d\n",
w_role->id);
goto exit;
}
// _phl_p2pps_dump_noa_table(phl_to_p2pps_info(phl),noa_info);
noa_info->paused = false;
for (i = 0; i < MAX_NOA_DESC; i++) {
struct rtw_phl_noa_desc *desc = &noa_info->noa_desc[i];
if(desc->enable)
_phl_p2pps_noa_enable(psinfo, noa_info, desc, desc);
}
// _phl_p2pps_dump_noa_table(phl_to_p2pps_info(phl),noa_info);
exit:
PHL_TRACE(COMP_PHL_P2PPS, _PHL_INFO_, "[NOA]phl_p2pps_noa_resume_all():<====\n");
}
void
phl_p2pps_noa_all_role_resume(struct phl_info_t *phl_info, u8 band_idx)
{
struct rtw_phl_com_t *phl_com = phl_info->phl_com;
struct mr_ctl_t *mr_ctl = phlcom_to_mr_ctrl(phl_com);
struct hw_band_ctl_t *band_ctrl = &(mr_ctl->band_ctrl[band_idx]);
struct rtw_wifi_role_t *wrole = NULL;
u8 ridx = 0;
for (ridx = 0; ridx < MAX_WIFI_ROLE_NUMBER; ridx++) {
if (!(band_ctrl->role_map & BIT(ridx)))
continue;
wrole = phl_get_wrole_by_ridx(phl_info, ridx);
if (wrole == NULL)
continue;
PHL_TRACE(COMP_PHL_P2PPS, _PHL_INFO_, "[NOA]p2pps_noa_all_role_resume():role_id(%d)\n",
ridx);
phl_p2pps_noa_resume_all(phl_info, wrole);
}
}
void
phl_p2pps_noa_pause_all(struct phl_info_t *phl,
struct rtw_wifi_role_t *w_role)
{
struct rtw_phl_p2pps_info *psinfo = phl_to_p2pps_info(phl);
u8 role_idx = get_role_idx(w_role);
struct rtw_phl_noa_info *noa_info = &psinfo->noa_info[role_idx];
u8 i = 0;
PHL_TRACE(COMP_PHL_P2PPS, _PHL_INFO_, "[NOA]phl_p2pps_noa_pause_all():====>\n");
//_phl_p2pps_dump_noa_table(phl_to_p2pps_info(phl),noa_info);
if (noa_info->paused) {
PHL_TRACE(COMP_PHL_P2PPS, _PHL_INFO_, "[NOA]phl_p2pps_noa_pause_all():This role(%d) NoA is in pause state\n",
role_idx);
goto exit;
}
noa_info->paused = true;
for (i = 0; i < MAX_NOA_DESC; i++) {
struct rtw_phl_noa_desc *desc = &noa_info->noa_desc[i];
_phl_p2pps_noa_disable(psinfo, noa_info, desc, false);
}
//_phl_p2pps_dump_noa_table(phl_to_p2pps_info(phl),noa_info);
exit:
PHL_TRACE(COMP_PHL_P2PPS, _PHL_INFO_, "[NOA]phl_p2pps_noa_pause_all():<====\n");
}
void phl_p2pps_noa_all_role_pause(struct phl_info_t *phl_info, u8 band_idx)
{
struct rtw_phl_com_t *phl_com = phl_info->phl_com;
struct mr_ctl_t *mr_ctl = phlcom_to_mr_ctrl(phl_com);
struct hw_band_ctl_t *band_ctrl = &(mr_ctl->band_ctrl[band_idx]);
struct rtw_wifi_role_t *wrole = NULL;
u8 ridx = 0;
for (ridx = 0; ridx < MAX_WIFI_ROLE_NUMBER; ridx++) {
if (!(band_ctrl->role_map & BIT(ridx)))
continue;
wrole = phl_get_wrole_by_ridx(phl_info, ridx);
if (wrole == NULL)
continue;
PHL_TRACE(COMP_PHL_P2PPS, _PHL_INFO_, "[NOA]phl_p2pps_noa_all_role_pause():role_id(%d)\n",
ridx);
phl_p2pps_noa_pause_all(phl_info, wrole);
}
}
void phl_p2pps_noa_disable_all(struct phl_info_t *phl_info,
struct rtw_wifi_role_t *w_role)
{
#ifdef RTW_WKARD_P2PPS_SINGLE_NOA
struct rtw_phl_noa_desc dis_desc = {0};
/*for notify MR for limitation disabled*/
dis_desc.enable = false;
dis_desc.w_role = w_role;
/*open when mr ready*/
//phl_mr_noa_dur_lim_change(phl_info, w_role, &dis_desc);
#endif
PHL_TRACE(COMP_PHL_P2PPS, _PHL_INFO_, "[NOA]phl_p2pps_noa_disable_all():====>\n");
_phl_p2pps_noa_disable_all(phl_info, w_role);
PHL_TRACE(COMP_PHL_P2PPS, _PHL_INFO_, "[NOA]phl_p2pps_noa_disable_all():Disable TSF 32 TOG for role %d\n",
w_role->id);
rtw_hal_tsf32_tog_disable(phl_info->hal, w_role);
PHL_TRACE(COMP_PHL_P2PPS, _PHL_INFO_, "[NOA]phl_p2pps_noa_disable_all():<====\n");
}
void phl_p2pps_query_noa_with_cnt255(struct phl_info_t* phl_info,
struct rtw_wifi_role_t *w_role, struct rtw_phl_noa_desc *desc)
{
struct rtw_phl_p2pps_info *psinfo = phl_to_p2pps_info(phl_info);
u8 role_idx = get_role_idx(w_role);
struct rtw_phl_noa_info *info = &psinfo->noa_info[role_idx];
struct rtw_phl_noa_desc *tmp_desc = NULL;
tmp_desc = _phl_p2pps_get_first_noa_desc_with_cnt255(phl_info, info);
if (tmp_desc) {
_phl_p2pps_copy_noa_desc(psinfo, desc, tmp_desc);
} else {
desc->enable = false;
desc->w_role = w_role;
}
}
enum rtw_phl_status
rtw_phl_p2pps_noa_update(void *phl,
struct rtw_phl_noa_desc *in_desc)
{
enum rtw_phl_status ret= RTW_PHL_STATUS_FAILURE;
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct rtw_phl_p2pps_info *psinfo = phl_to_p2pps_info(phl_info);
struct rtw_wifi_role_t *w_role = in_desc->w_role;
u8 role_id = get_role_idx(w_role);
struct rtw_phl_noa_info *noa_info = &psinfo->noa_info[role_id];
u8 desc_idx = in_desc->tag;
struct rtw_phl_noa_desc *noa_desc = &noa_info->noa_desc[desc_idx];
PHL_TRACE(COMP_PHL_P2PPS, _PHL_INFO_, "[NOA]rtw_phl_p2pps_noa_update():DUMP BEFORE!\n");
_phl_p2pps_dump_noa_table(psinfo, noa_info);
PHL_TRACE(COMP_PHL_P2PPS, _PHL_INFO_, "[NOA]rtw_phl_p2pps_noa_update():cur FW en desc num = %d\n",
noa_info->en_desc_num);
if (in_desc->enable) {
if (_phl_p2pps_noa_is_all_disable(psinfo, noa_info)) {
PHL_TRACE(COMP_PHL_P2PPS, _PHL_INFO_, "[NOA]rtw_phl_p2pps_noa_update():roleid(%d) Enable TSF 32 Toggle!\n",
role_id);
rtw_hal_tsf32_tog_enable(phl_info->hal, in_desc->w_role);
/*todo set TSF_ BIT TOG H2C ON*/
}
PHL_TRACE(COMP_PHL_P2PPS, _PHL_INFO_, "[NOA]rtw_phl_p2pps_noa_update():Tag = %d, NoA enable request!\n",
in_desc->tag);
PHL_TRACE(COMP_PHL_P2PPS, _PHL_INFO_, "[NOA]rtw_phl_p2pps_noa_update():Tag = %d, NoA disable origninl req first!\n",
in_desc->tag);
_phl_p2pps_noa_disable(psinfo, noa_info, noa_desc, true);
ret = _phl_p2pps_noa_enable(psinfo, noa_info, noa_desc,
in_desc);
} else {
PHL_TRACE(COMP_PHL_P2PPS, _PHL_INFO_, "[NOA]rtw_phl_p2pps_noa_update():Tag = %d, NoA disable request!\n",
in_desc->tag);
ret = _phl_p2pps_noa_disable(psinfo, noa_info, noa_desc, true);
if (_phl_p2pps_noa_is_all_disable(psinfo, noa_info)) {
PHL_TRACE(COMP_PHL_P2PPS, _PHL_INFO_, "[NOA]rtw_phl_p2pps_noa_update():roleid(%d) Disable TSF 32 Toggle!\n",
role_id);
rtw_hal_tsf32_tog_disable(phl_info->hal, in_desc->w_role);
/*todo set TSF_ BIT TOG H2C OFF*/
}
}
PHL_TRACE(COMP_PHL_P2PPS, _PHL_INFO_, "[NOA]rtw_phl_p2pps_noa_update():DUMP AFTER!\n");
_phl_p2pps_dump_noa_table(psinfo, noa_info);
return ret;
}
void rtw_phl_p2pps_noa_disable_all(void *phl,
struct rtw_wifi_role_t *w_role)
{
PHL_TRACE(COMP_PHL_P2PPS, _PHL_INFO_, "[NOA]rtw_phl_p2pps_noa_disable_all()!\n");
phl_p2pps_noa_disable_all((struct phl_info_t *)phl, w_role);
}
void rtw_phl_p2pps_init_ops(void *phl,
struct rtw_phl_p2pps_ops *ops)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct rtw_phl_p2pps_info *psinfo = NULL;
psinfo = phl_to_p2pps_info(phl_info);
psinfo->ops.priv = ops->priv;
psinfo->ops.tsf32_tog_update_noa = ops->tsf32_tog_update_noa;
psinfo->ops.tsf32_tog_update_single_noa = ops->tsf32_tog_update_single_noa;
PHL_TRACE(COMP_PHL_P2PPS, _PHL_INFO_, "rtw_phl_p2pps_init_ops(): init ok\n");
}
#endif
#endif
|
2301_81045437/rtl8852be
|
phl/phl_p2pps.c
|
C
|
agpl-3.0
| 26,588
|
/******************************************************************************
*
* Copyright(c)2019 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#ifndef _PHL_P2PPS_H_
#define _PHL_P2PPS_H_
#ifdef CONFIG_PHL_P2PPS
#ifdef RTW_WKARD_P2PPS_REFINE
enum rtw_phl_status
phl_p2pps_init(struct phl_info_t* phl_info);
void
phl_p2pps_deinit(struct phl_info_t* phl_info);
void
phl_p2pps_query_noa_with_cnt255(struct phl_info_t* phl_info,
struct rtw_wifi_role_t *w_role,
struct rtw_phl_noa_desc *desc);
void
phl_p2pps_tsf32_tog_handler(struct phl_info_t* phl_info);
void
phl_p2pps_noa_resume_all(struct phl_info_t *phl,
struct rtw_wifi_role_t *w_role);
void
phl_p2pps_noa_all_role_resume(struct phl_info_t *phl_info,
u8 band_idx);
void
phl_p2pps_noa_pause_all(struct phl_info_t *phl,
struct rtw_wifi_role_t *w_role);
void
phl_p2pps_noa_all_role_pause(struct phl_info_t *phl_info,
u8 band_idx);
void
phl_p2pps_noa_disable_all(struct phl_info_t *phl,
struct rtw_wifi_role_t *w_role);
enum rtw_phl_status
rtw_phl_p2pps_noa_update(void *phl,
struct rtw_phl_noa_desc *in_desc);
#endif
#else
#define phl_p2pps_init(_phl_info) RTW_PHL_STATUS_SUCCESS
#define phl_p2pps_deinit(_phl_info)
#define rtw_phl_p2pps_noa_update(_phl, _in_desc) RTW_PHL_STATUS_SUCCESS
#define phl_p2pps_tsf32_tog_handler(_phl_info)
#define phl_p2pps_query_noa_with_cnt255(_phl, _wrole, _desc)
#define phl_p2pps_noa_resume_all(_phl, _w_role)
#define phl_p2pps_noa_all_role_resume(_phl, _band_idx)
#define phl_p2pps_noa_pause_all(_phl, _w_role)
#define phl_p2pps_noa_all_role_pause(_phl, _band_idx)
#define phl_p2pps_noa_disable_all(_phl, _w_role)
#endif
#endif
|
2301_81045437/rtl8852be
|
phl/phl_p2pps.h
|
C
|
agpl-3.0
| 2,132
|
/******************************************************************************
*
* Copyright(c)2019 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#ifndef _PHL_P2PPS_DEF_H_
#define _PHL_P2PPS_DEF_H_
#ifdef RTW_WKARD_P2PPS_REFINE
#ifdef CONFIG_PHL_P2PPS
struct rtw_phl_noa_info {
u8 en_desc_num;
u8 paused;
struct rtw_phl_noa_desc noa_desc[MAX_NOA_DESC];
struct rtw_phl_opps_desc opps_desc;
};
struct rtw_phl_p2pps_info {
struct phl_info_t *phl_info;
struct rtw_phl_p2pps_ops ops;
_os_lock p2pps_lock;
struct rtw_phl_noa_info noa_info[MAX_WIFI_ROLE_NUMBER];
};
#endif
#endif
#endif
|
2301_81045437/rtl8852be
|
phl/phl_p2pps_def.h
|
C
|
agpl-3.0
| 1,092
|
/******************************************************************************
*
* Copyright(c) 2019 - 2020 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#include "phl_headers.h"
static const char *
_phl_pkt_ofld_get_txt(u8 type)
{
switch (type)
{
case PKT_TYPE_PROBE_RSP:
return "PROBE RSP";
case PKT_TYPE_PS_POLL:
return "PS POLL";
case PKT_TYPE_NULL_DATA:
return "NULL DATA";
case PKT_TYPE_QOS_NULL:
return "QOS NULL";
case PKT_TYPE_CTS2SELF:
return "CTS2SELF";
case PKT_TYPE_ARP_RSP:
return "ARP Response";
case PKT_TYPE_NDP:
return "Neighbor Discovery Protocol";
case PKT_TYPE_EAPOL_KEY:
return "EAPOL-KEY";
case PKT_TYPE_SA_QUERY:
return "SA QUERY";
default:
return "Unknown?!";
}
}
static u8
_phl_pkt_ofld_is_pkt_ofld(struct pkt_ofld_info *pkt_info)
{
if(pkt_info->id != NOT_USED)
return true;
else
return false;
}
static void
_phl_pkt_ofld_dbg_dump_pkt_info(struct pkt_ofld_obj *ofld_obj,
struct pkt_ofld_info *pkt_info)
{
struct pkt_ofld_req *pos = NULL;
phl_list_for_loop(pos, struct pkt_ofld_req, &pkt_info->req_q, list) {
PHL_TRACE(COMP_PHL_PKTOFLD, _PHL_INFO_,
"[PKT] ## token %d, req name unknown\n",
pos->token);
}
if(_phl_pkt_ofld_is_pkt_ofld(pkt_info)) {
HAL_PKT_OFLD_READ(ofld_obj, &pkt_info->id);
}
}
static void
_phl_pkt_ofld_dbg_dump(struct pkt_ofld_obj *ofld_obj)
{
u8 idx;
struct pkt_ofld_entry *pos = NULL;
phl_list_for_loop(pos, struct pkt_ofld_entry, &ofld_obj->entry_q, list) {
PHL_TRACE(COMP_PHL_PKTOFLD, _PHL_INFO_,
"[PKT] => mac id = %d\n", pos->macid);
for(idx = 0; idx < PKT_OFLD_TYPE_MAX; idx++) {
PHL_TRACE(COMP_PHL_PKTOFLD, _PHL_INFO_,
"[PKT] type %-10s:id = %d, req cnt = %d.\n",
_phl_pkt_ofld_get_txt(idx),
pos->pkt_info[idx].id,
pos->pkt_info[idx].req_cnt);
_phl_pkt_ofld_dbg_dump_pkt_info(ofld_obj,
&pos->pkt_info[idx]);
}
}
}
static struct pkt_ofld_req *
_phl_pkt_ofld_gen_req(struct pkt_ofld_obj *ofld_obj, const char *req_name)
{
struct pkt_ofld_req *req;
void *d = phl_to_drvpriv(ofld_obj->phl_info);
req = _os_mem_alloc(d, sizeof(struct pkt_ofld_req));
if (req == NULL) {
PHL_ERR("[PKT] %s: alloc memory req failed.\n", __func__);
return NULL;
}
INIT_LIST_HEAD(&req->list);
req->token = ofld_obj->cur_seq++;
req->req_name_len = _os_strlen((u8 *)req_name) +1;
req->req_name = _os_mem_alloc(d, sizeof(u8)*req->req_name_len);
if (req->req_name == NULL) {
PHL_ERR("[PKT] %s: alloc memory req name failed.\n", __func__);
_os_mem_free(d, req, sizeof(struct pkt_ofld_req));
return NULL;
}
_os_mem_set(d, req->req_name, 0, req->req_name_len);
_os_mem_cpy(d, req->req_name, (char *)req_name, req->req_name_len);
PHL_TRACE(COMP_PHL_PKTOFLD, _PHL_INFO_,
"[PKT] New request %p, token = %d, name = unknown.\n",
req, req->token);
return req;
}
static void
_phl_pkt_ofld_del_req(struct pkt_ofld_obj *ofld_obj, struct pkt_ofld_info *pkt_info,
struct pkt_ofld_req *req)
{
void *d = phl_to_drvpriv(ofld_obj->phl_info);
list_del(&req->list);
pkt_info->req_cnt--;
_os_mem_free(d, req->req_name, sizeof(u8)*req->req_name_len);
_os_mem_free(d, req, sizeof(*req));
}
static struct pkt_ofld_req *
_phl_pkt_ofld_get_req(struct pkt_ofld_obj *ofld_obj, struct pkt_ofld_info *pkt_info,
u32 token)
{
struct pkt_ofld_req *pos = NULL;
u8 find = false;
phl_list_for_loop(pos, struct pkt_ofld_req,
&pkt_info->req_q, list) {
if (pos->token == token) {
find = true;
break;
}
}
if (find) {
return pos;
} else {
return NULL;
}
}
static void
_phl_pkt_ofld_add_req(struct pkt_ofld_obj *ofld_obj, struct pkt_ofld_info *pkt_info,
struct pkt_ofld_req *req)
{
list_add(&req->list, &pkt_info->req_q);
pkt_info->req_cnt++;
}
static enum rtw_phl_status
_phl_pkt_ofld_construct_null_data(struct pkt_ofld_obj *ofld_obj, u8 **pkt_buf,
u16 *len, struct rtw_phl_stainfo_t *phl_sta,
struct rtw_pkt_ofld_null_info *null_info)
{
void *d = phl_to_drvpriv(ofld_obj->phl_info);
struct rtw_wifi_role_t *wrole = phl_sta->wrole;
*len = NULL_PACKET_LEN;
*pkt_buf = _os_mem_alloc(d, NULL_PACKET_LEN);
if (*pkt_buf == NULL) {
return RTW_PHL_STATUS_RESOURCE;
}
_os_mem_set(d, *pkt_buf, 0, NULL_PACKET_LEN);
SET_80211_PKT_HDR_FRAME_CONTROL(*pkt_buf, 0);
SET_80211_PKT_HDR_TYPE_AND_SUBTYPE(*pkt_buf, TYPE_NULL_FRAME);
SET_80211_PKT_HDR_PWR_MGNT(*pkt_buf, 0);
switch(wrole->type)
{
case PHL_RTYPE_STATION:
SET_80211_PKT_HDR_TO_DS(*pkt_buf, 1);
SET_80211_PKT_HDR_ADDRESS1(d, *pkt_buf, null_info->a1);
SET_80211_PKT_HDR_ADDRESS2(d, *pkt_buf, null_info->a2);
SET_80211_PKT_HDR_ADDRESS3(d, *pkt_buf, null_info->a3);
break;
case PHL_RTYPE_AP:
case PHL_RTYPE_ADHOC:
default:
break;
}
SET_80211_PKT_HDR_DURATION(*pkt_buf, 0);
SET_80211_PKT_HDR_FRAGMENT_SEQUENCE(*pkt_buf, 0);
return RTW_PHL_STATUS_SUCCESS;
}
static enum rtw_phl_status
_phl_pkt_ofld_construct_na(struct pkt_ofld_obj *pkt, u8 **pkt_buf,
u16 *len, struct rtw_phl_stainfo_t *phl_sta,
struct rtw_pkt_ofld_na_info *na_info)
{
void *d = phl_to_drvpriv(pkt->phl_info);
u8* p_na_body;
u8 NSLLCHeader[8] = {0xAA, 0xAA, 0x03, 0x00, 0x00, 0x00, 0x86, 0xDD};
u8 IPv6HeadInfo[4] = {0x60, 0x00, 0x00, 0x00};
u8 IPv6HeadContx[4] = {0x00, 0x20, 0x3a, 0xff};
u8 ICMPv6Head[8] = {0x88, 0x00, 0x00, 0x00 , 0x60 , 0x00 , 0x00 , 0x00};
u8 sec_hdr = na_info->sec_hdr;
/* size estimation */
/* sMacHdrLng + LLC header(8) + na element(28) */
*len = MAC_HDR_LEN + sec_hdr + 8 + 72;
*pkt_buf = _os_mem_alloc(d, *len);
if (*pkt_buf == NULL)
return RTW_PHL_STATUS_RESOURCE;
_os_mem_set(d, *pkt_buf, 0, *len);
SET_80211_PKT_HDR_FRAME_CONTROL(*pkt_buf, 0);
SET_80211_PKT_HDR_TYPE_AND_SUBTYPE(*pkt_buf, TYPE_DATA_FRAME);
SET_80211_PKT_HDR_TO_DS(*pkt_buf, 1);
if (sec_hdr != 0)
SET_80211_PKT_HDR_PROTECT(*pkt_buf, 1);
SET_80211_PKT_HDR_ADDRESS1(d, *pkt_buf, na_info->a1);
SET_80211_PKT_HDR_ADDRESS2(d, *pkt_buf, na_info->a2);
SET_80211_PKT_HDR_ADDRESS3(d, *pkt_buf, na_info->a3);
SET_80211_PKT_HDR_DURATION(*pkt_buf, 0);
SET_80211_PKT_HDR_FRAGMENT_SEQUENCE(*pkt_buf, 0);
/* Frame Body */
p_na_body = (u8*)(*pkt_buf + MAC_HDR_LEN);
/* offset for security iv */
p_na_body += sec_hdr;
/* LLC heade*/
_os_mem_cpy(d, p_na_body, NSLLCHeader, 8);
/* IPv6 Heard */
/* 1 . Information (4 bytes): 0x60 0x00 0x00 0x00 */
_os_mem_cpy(d, p_na_body+8, IPv6HeadInfo, 4);
/* 2 . playload : 0x00 0x20 , NextProt : 0x3a (ICMPv6) HopLim : 0xff */
_os_mem_cpy(d, p_na_body+12, IPv6HeadContx, 4);
/* 3 . SA : 16 bytes , DA : 16 bytes ( Fw will fill) */
_os_mem_set(d, p_na_body+16, 0, 32);
/* ICMPv6 */
/* 1. Type : 0x88 (NA) , Code : 0x00 , ChechSum : 0x00 0x00 (RSvd) NAFlag: 0x60 0x00 0x00 0x00 ( Solicited , Override) */
_os_mem_cpy(d, p_na_body+48, ICMPv6Head, 8);
/* 2. TA : 16 bytes */
_os_mem_set(d, p_na_body+56, 0, 16);
/* ICMPv6 Target Link Layer address */
WriteLE1Byte(p_na_body+72, 0x02); /* type */
WriteLE1Byte(p_na_body+73, 0x01); /* Len 1 unit of 8 octes */
_os_mem_set(d, p_na_body+74, 0, 6); /* Target Link Address */
return RTW_PHL_STATUS_SUCCESS;
}
static enum rtw_phl_status
_phl_pkt_ofld_construct_arp_rsp(struct pkt_ofld_obj *pkt, u8 **pkt_buf,
u16 *len, struct rtw_phl_stainfo_t *phl_sta,
struct rtw_pkt_ofld_arp_rsp_info *arp_rsp_info)
{
void *d = phl_to_drvpriv(pkt->phl_info);
u8* p_arp_rsp_body;
u8 ARPLLCHeader[8] = {0xAA, 0xAA, 0x03, 0x00, 0x00, 0x00, 0x08, 0x06};
u8 sec_hdr = arp_rsp_info->sec_hdr;
/* size estimation */
/* sMacHdrLng + LLC header(8) + arp element(28) */
*len = MAC_HDR_LEN + sec_hdr + 8 + 28;
*pkt_buf = _os_mem_alloc(d, *len);
if (*pkt_buf == NULL)
return RTW_PHL_STATUS_RESOURCE;
_os_mem_set(d, *pkt_buf, 0, *len);
SET_80211_PKT_HDR_FRAME_CONTROL(*pkt_buf, 0);
SET_80211_PKT_HDR_TYPE_AND_SUBTYPE(*pkt_buf, TYPE_DATA_FRAME);
SET_80211_PKT_HDR_TO_DS(*pkt_buf, 1);
if (sec_hdr != 0)
SET_80211_PKT_HDR_PROTECT(*pkt_buf, 1);
SET_80211_PKT_HDR_ADDRESS1(d, *pkt_buf, arp_rsp_info->a1);
SET_80211_PKT_HDR_ADDRESS2(d, *pkt_buf, arp_rsp_info->a2);
SET_80211_PKT_HDR_ADDRESS3(d, *pkt_buf, arp_rsp_info->a3);
SET_80211_PKT_HDR_DURATION(*pkt_buf, 0);
SET_80211_PKT_HDR_FRAGMENT_SEQUENCE(*pkt_buf, 0);
/* Frame bod*/
p_arp_rsp_body = (u8*)(*pkt_buf + MAC_HDR_LEN);
/* offset for security iv */
p_arp_rsp_body += sec_hdr;
/* LLC header */
_os_mem_cpy(d, p_arp_rsp_body, ARPLLCHeader, 8);
/* ARP element */
p_arp_rsp_body += 8;
/* hardware type = 0x10 Ethernet */
WriteLE2Byte(p_arp_rsp_body, 0x0100);
/* Protcol type = 0800 IP */
WriteLE2Byte(p_arp_rsp_body+2, 0x0008);
/* Hardware address length = 6 */
WriteLE1Byte(p_arp_rsp_body+4, 6);
/* Protocol address length = 4 */
WriteLE1Byte(p_arp_rsp_body+5, 4);
/* Operation = 0x0002 arp response */
WriteLE2Byte(p_arp_rsp_body+6, 0x0200);
/* Sender Mac Address */
_os_mem_cpy(d, p_arp_rsp_body+8, arp_rsp_info->a2, 6);
/* Sender IPv4 Address */
_os_mem_cpy(d, p_arp_rsp_body+14,
&(arp_rsp_info->host_ipv4_addr[0]), 4);
/* Target Mac Address */
_os_mem_cpy(d, p_arp_rsp_body+18, arp_rsp_info->a1, 6);
/* Target IPv4 Address */
_os_mem_cpy(d, p_arp_rsp_body+24,
&(arp_rsp_info->remote_ipv4_addr[0]), 4);
return RTW_PHL_STATUS_SUCCESS;
}
static enum rtw_phl_status
_phl_pkt_ofld_construct_eapol_key_data(struct pkt_ofld_obj *ofld_obj, u8 **pkt_buf,
u16 *len, struct rtw_phl_stainfo_t *phl_sta,
struct rtw_pkt_ofld_eapol_key_info *eapol_key_info)
{
void *d = phl_to_drvpriv(ofld_obj->phl_info);
u8 *pkt = NULL;
u8 llc_hdr[] = {0xAA, 0xAA, 0x03};
u8 llc_oui[] = {0x00, 0x00, 0x00};
u8 llc_proto_id[] = {0x88, 0x8E};
u8 sec_hdr = eapol_key_info->sec_hdr;
u8 key_desc_ver = eapol_key_info->key_desc_ver;
*len = MAC_HDR_LEN+sec_hdr+LLC_LEN+AUTH_1X_HDR_LEN+EAPOLMSG_HDR_LEN;
*pkt_buf = _os_mem_alloc(d, *len);
if (*pkt_buf == NULL)
return RTW_PHL_STATUS_RESOURCE;
pkt = *pkt_buf;
_os_mem_set(d, pkt, 0, *len);
SET_80211_PKT_HDR_FRAME_CONTROL(pkt, 0);
SET_80211_PKT_HDR_TYPE_AND_SUBTYPE(pkt, TYPE_DATA_FRAME);
SET_80211_PKT_HDR_TO_DS(pkt, 1);
if (sec_hdr != 0)
SET_80211_PKT_HDR_PROTECT(*pkt_buf, 1);
SET_80211_PKT_HDR_ADDRESS1(d, pkt, eapol_key_info->a1); /* BSSID */
SET_80211_PKT_HDR_ADDRESS2(d, pkt, eapol_key_info->a2); /* SA */
SET_80211_PKT_HDR_ADDRESS3(d, pkt, eapol_key_info->a3); /* DA */
SET_80211_PKT_HDR_DURATION(pkt, 0);
SET_80211_PKT_HDR_FRAGMENT_SEQUENCE(pkt, 0);
pkt += MAC_HDR_LEN;
/* offset for security iv */
pkt += sec_hdr;
/* LLC */
SET_LLC_HDR(d, pkt, llc_hdr);
SET_LLC_OUI(d, pkt, llc_oui);
SET_LLC_PROTO_ID(d, pkt, llc_proto_id);
pkt += LLC_LEN;
/* 802.1x Auth hdr */
SET_AUTH_1X_PROTO_VER(pkt, LIB1X_EAPOL_VER);
SET_AUTH_1X_PKT_TYPE(pkt, LIB1X_TYPE_EAPOL_KEY);
SET_AUTH_1X_LENGTH(pkt, EAPOLMSG_HDR_LEN);
pkt += AUTH_1X_HDR_LEN;
/* EAPOL-KEY */
SET_EAPOLKEY_KEYDESC_TYPE(pkt, EAPOLKEY_KEYDESC_TYPE_RSN);
SET_EAPOLKEY_KEY_INFO(pkt, EAPOLKEY_SECURE|EAPOLKEY_KEYMIC|key_desc_ver);
/* SET_EAPOLKEY_REPLAY_CNT(d, pkt, eapol_key_info->replay_cnt); */
return RTW_PHL_STATUS_SUCCESS;
}
static enum rtw_phl_status
_phl_pkt_ofld_construct_sa_query_pkt(struct pkt_ofld_obj *ofld_obj, u8 **pkt_buf,
u16 *len, struct rtw_phl_stainfo_t *phl_sta,
struct rtw_pkt_ofld_sa_query_info *sa_query_info)
{
void *d = phl_to_drvpriv(ofld_obj->phl_info);
u8 *pkt = NULL;
u8 sec_hdr = sa_query_info->sec_hdr;
*len = MAC_HDR_LEN+sec_hdr+SAQ_ACTION_LEN;
*pkt_buf = _os_mem_alloc(d, *len);
if (*pkt_buf == NULL)
return RTW_PHL_STATUS_RESOURCE;
pkt = *pkt_buf;
_os_mem_set(d, pkt, 0, *len);
SET_80211_PKT_HDR_FRAME_CONTROL(pkt, 0);
SET_80211_PKT_HDR_TYPE_AND_SUBTYPE(pkt, TYPE_ACTION_FRAME);
SET_80211_PKT_HDR_DURATION(pkt, 0);
if (sec_hdr != 0)
SET_80211_PKT_HDR_PROTECT(pkt, 1);
SET_80211_PKT_HDR_ADDRESS1(d, pkt, sa_query_info->a1); /* bssid */
SET_80211_PKT_HDR_ADDRESS2(d, pkt, sa_query_info->a2); /* sa */
SET_80211_PKT_HDR_ADDRESS3(d, pkt, sa_query_info->a3); /* da */
SET_80211_PKT_HDR_FRAGMENT_SEQUENCE(pkt, 0);
pkt += MAC_HDR_LEN;
/* offset for security iv */
pkt += sec_hdr;
SET_ACTION_FRAME_CATEGORY(pkt, ACT_TYPE_SA_QUERY);
SET_SAQ_ACTION_FIELD(pkt, 1);
return RTW_PHL_STATUS_SUCCESS;
}
static enum rtw_phl_status
_phl_pkt_ofld_construct_realwow_kapkt(struct pkt_ofld_obj *ofld_obj, u8 **pkt_buf,
u16 *len, struct rtw_phl_stainfo_t *phl_sta,
struct rtw_pkt_ofld_realwow_kapkt_info *kapkt_info)
{
void *d = phl_to_drvpriv(ofld_obj->phl_info);
*len = kapkt_info->keep_alive_pkt_size;
*pkt_buf = _os_mem_alloc(d, *len);
if (*pkt_buf == NULL)
return RTW_PHL_STATUS_RESOURCE;
_os_mem_set(d, *pkt_buf, 0, *len);
_os_mem_cpy(d, *pkt_buf, kapkt_info, *len);
return RTW_PHL_STATUS_SUCCESS;
}
static enum rtw_phl_status
_phl_pkt_ofld_construct_realwow_ack(struct pkt_ofld_obj *ofld_obj, u8 **pkt_buf,
u16 *len, struct rtw_phl_stainfo_t *phl_sta,
struct rtw_pkt_ofld_realwow_ack_info *ack_info)
{
void *d = phl_to_drvpriv(ofld_obj->phl_info);
*len = ack_info->ack_ptrn_size;
*pkt_buf = _os_mem_alloc(d, *len);
if (*pkt_buf == NULL)
return RTW_PHL_STATUS_RESOURCE;
_os_mem_set(d, *pkt_buf, 0, *len);
_os_mem_cpy(d, *pkt_buf, ack_info, *len);
return RTW_PHL_STATUS_SUCCESS;
}
static enum rtw_phl_status
_phl_pkt_ofld_construct_realwow_wp(struct pkt_ofld_obj *ofld_obj, u8 **pkt_buf,
u16 *len, struct rtw_phl_stainfo_t *phl_sta,
struct rtw_pkt_ofld_realwow_wp_info *wake_info)
{
void *d = phl_to_drvpriv(ofld_obj->phl_info);
*len = wake_info->wakeup_ptrn_size;
*pkt_buf = _os_mem_alloc(d, *len);
if (*pkt_buf == NULL)
return RTW_PHL_STATUS_RESOURCE;
_os_mem_set(d, *pkt_buf, 0, *len);
_os_mem_cpy(d, *pkt_buf, wake_info, *len);
return RTW_PHL_STATUS_SUCCESS;
}
static enum rtw_phl_status
_phl_pkt_ofld_construct_packet(struct pkt_ofld_obj *ofld_obj, u16 macid,
u8 type, u8 **pkt_buf, u16 *len, void *buf)
{
struct rtw_phl_stainfo_t *phl_sta = rtw_phl_get_stainfo_by_macid(
ofld_obj->phl_info, macid);
enum rtw_phl_status status = RTW_PHL_STATUS_SUCCESS;
switch(type) {
case PKT_TYPE_NULL_DATA:
status = _phl_pkt_ofld_construct_null_data(ofld_obj, pkt_buf,
len, phl_sta, (struct rtw_pkt_ofld_null_info *) buf);
break;
case PKT_TYPE_ARP_RSP:
status = _phl_pkt_ofld_construct_arp_rsp(ofld_obj, pkt_buf,
len, phl_sta, (struct rtw_pkt_ofld_arp_rsp_info *) buf);
break;
case PKT_TYPE_NDP:
status = _phl_pkt_ofld_construct_na(ofld_obj, pkt_buf,
len, phl_sta, (struct rtw_pkt_ofld_na_info *) buf);
break;
case PKT_TYPE_EAPOL_KEY:
status = _phl_pkt_ofld_construct_eapol_key_data(ofld_obj, pkt_buf,
len, phl_sta, (struct rtw_pkt_ofld_eapol_key_info *) buf);
break;
case PKT_TYPE_SA_QUERY:
status = _phl_pkt_ofld_construct_sa_query_pkt(ofld_obj, pkt_buf,
len, phl_sta, (struct rtw_pkt_ofld_sa_query_info *) buf);
break;
case PKT_TYPE_REALWOW_KAPKT:
status = _phl_pkt_ofld_construct_realwow_kapkt(ofld_obj, pkt_buf,
len, phl_sta, (struct rtw_pkt_ofld_realwow_kapkt_info *) buf);
break;
case PKT_TYPE_REALWOW_ACK:
status = _phl_pkt_ofld_construct_realwow_ack(ofld_obj, pkt_buf,
len, phl_sta, (struct rtw_pkt_ofld_realwow_ack_info *) buf);
break;
case PKT_TYPE_REALWOW_WP:
status = _phl_pkt_ofld_construct_realwow_wp(ofld_obj, pkt_buf,
len, phl_sta, (struct rtw_pkt_ofld_realwow_wp_info *) buf);
break;
case PKT_TYPE_PROBE_RSP:
case PKT_TYPE_PS_POLL:
case PKT_TYPE_QOS_NULL:
case PKT_TYPE_CTS2SELF:
default:
PHL_ERR("[PKT] packet type %s is not implemented.\n",
_phl_pkt_ofld_get_txt(type));
status = RTW_PHL_STATUS_FAILURE;
break;
}
if(status == RTW_PHL_STATUS_SUCCESS) {
PHL_TRACE(COMP_PHL_PKTOFLD, _PHL_INFO_,"%s:\n", _phl_pkt_ofld_get_txt(type));
debug_dump_data((u8 *)*pkt_buf, *len, "construct packet");
} else if(status == RTW_PHL_STATUS_RESOURCE) {
PHL_ERR("[PKT] %s: alloc memory failed.\n", __func__);
} else {
PHL_ERR("[PKT] %s: failed.\n", __func__);
}
return status;
}
static void
_phl_pkt_ofld_init_entry(struct pkt_ofld_entry *entry, u16 macid)
{
u8 idx;
INIT_LIST_HEAD(&entry->list);
entry->macid = macid;
for(idx = 0; idx < PKT_OFLD_TYPE_MAX; idx++) {
INIT_LIST_HEAD(&entry->pkt_info[idx].req_q);
entry->pkt_info[idx].id = NOT_USED;
entry->pkt_info[idx].req_cnt = 0;
}
}
static u8
_phl_pkt_ofld_is_entry_exist(struct pkt_ofld_obj *ofld_obj, u16 macid)
{
struct pkt_ofld_entry *pos = NULL;
phl_list_for_loop(pos, struct pkt_ofld_entry, &ofld_obj->entry_q, list) {
if (pos->macid == macid) {
PHL_ERR("[PKT] %s, mac id(%d) already in entry queue.\n",
__func__, macid);
return true;
}
}
return false;
}
static void
_phl_pkt_ofld_del_all_req(struct pkt_ofld_obj *ofld_obj,
struct pkt_ofld_info *pkt_info)
{
struct pkt_ofld_req *pos = NULL;
struct pkt_ofld_req *n = NULL;
phl_list_for_loop_safe(pos, n, struct pkt_ofld_req,
&pkt_info->req_q, list) {
_phl_pkt_ofld_del_req(ofld_obj, pkt_info, pos);
}
}
static void
_phl_pkt_ofld_del_ofld_type(struct pkt_ofld_obj *ofld_obj,
struct pkt_ofld_entry *entry)
{
u8 idx;
u8 id;
for(idx = 0; idx < PKT_OFLD_TYPE_MAX; idx++) {
if(_phl_pkt_ofld_is_pkt_ofld(&entry->pkt_info[idx])) {
id = entry->pkt_info[idx].id;
if(HAL_PKT_OFLD_DEL(ofld_obj, &id) !=
RTW_HAL_STATUS_SUCCESS) {
PHL_ERR("[PKT] %s: delete pkt(%d) failed, id(%d).\n",
__func__, idx, id);
}
}
}
}
static void
_phl_pkt_ofld_reset_entry(struct pkt_ofld_obj *ofld_obj,
struct pkt_ofld_entry *entry)
{
u8 idx;
for(idx = 0; idx < PKT_OFLD_TYPE_MAX; idx++) {
entry->pkt_info[idx].id = NOT_USED;
_phl_pkt_ofld_del_all_req(ofld_obj, &entry->pkt_info[idx]);
}
}
static void
_phl_pkt_ofld_add_entry(struct pkt_ofld_obj *ofld_obj,
struct pkt_ofld_entry *entry, u16 macid)
{
PHL_TRACE(COMP_PHL_PKTOFLD, _PHL_INFO_,
"[PKT] New entry %p, mac id = %d\n", entry, macid);
_phl_pkt_ofld_init_entry(entry, macid);
list_add(&entry->list, &ofld_obj->entry_q);
ofld_obj->entry_cnt++;
if(rtw_hal_pkt_update_ids(ofld_obj->phl_info->hal, entry)
!= RTW_HAL_STATUS_SUCCESS) {
PHL_WARN("%s: init general id failed.\n", __func__);
}
}
static void
_phl_pkt_ofld_del_entry(struct pkt_ofld_obj *ofld_obj, struct pkt_ofld_entry *entry)
{
void *d = phl_to_drvpriv(ofld_obj->phl_info);
list_del(&entry->list);
ofld_obj->entry_cnt--;
_os_mem_free(d, entry, sizeof(*entry));
}
static struct pkt_ofld_entry *
_phl_pkt_ofld_get_entry(struct pkt_ofld_obj *ofld_obj, u16 macid)
{
struct pkt_ofld_entry *pos = NULL;
u8 find = false;
phl_list_for_loop(pos, struct pkt_ofld_entry, &ofld_obj->entry_q, list) {
if (pos->macid == macid) {
find = true;
break;
}
}
if (find) {
return pos;
} else {
return NULL;
}
}
static enum rtw_phl_status
_phl_pkt_ofld_req_type(struct pkt_ofld_obj *ofld_obj,
struct pkt_ofld_entry *entry, u8 type, void *buf)
{
enum rtw_phl_status phl_status;
enum rtw_hal_status hal_status;
struct pkt_ofld_info *pkt_info = &entry->pkt_info[type];
void *d = phl_to_drvpriv(ofld_obj->phl_info);
u8 *pkt_buf = NULL;
u16 len = 0;
if(_phl_pkt_ofld_is_pkt_ofld(pkt_info)) {
PHL_TRACE(COMP_PHL_PKTOFLD, _PHL_INFO_,
"[PKT] %s: type %s is already download to FW.\n",
__func__, _phl_pkt_ofld_get_txt(type));
return RTW_PHL_STATUS_SUCCESS;
}
phl_status = _phl_pkt_ofld_construct_packet(ofld_obj, entry->macid, type,
&pkt_buf, &len, buf);
if(phl_status != RTW_PHL_STATUS_SUCCESS) {
PHL_ERR("[PKT] %s: construct packet type(%d) failed.\n",
__func__, type);
return RTW_PHL_STATUS_FAILURE;
}
hal_status = HAL_PKT_OFLD_ADD(ofld_obj, &pkt_info->id, pkt_buf, &len);
_os_mem_free(d, pkt_buf, len);
if(hal_status != RTW_HAL_STATUS_SUCCESS) {
pkt_info->id = NOT_USED;
PHL_ERR("[PKT] %s: add packet offload(%d) failed.\n", __func__,
pkt_info->id);
return RTW_PHL_STATUS_FAILURE;
}
hal_status = rtw_hal_pkt_update_ids(ofld_obj->phl_info->hal, entry);
if(hal_status != RTW_HAL_STATUS_SUCCESS) {
pkt_info->id = NOT_USED;
PHL_ERR("[PKT] %s: update id failed.\n", __func__);
return RTW_PHL_STATUS_FAILURE;
}
return RTW_PHL_STATUS_SUCCESS;
}
static enum rtw_phl_status
_phl_pkt_ofld_cancel_type(struct pkt_ofld_obj *ofld_obj,
struct pkt_ofld_entry *entry, u8 type)
{
struct pkt_ofld_info *pkt_info = &entry->pkt_info[type];
if(_phl_pkt_ofld_is_pkt_ofld(pkt_info) == false) {
PHL_ERR("[PKT] %s, type not offload to FW.\n", __func__);
return RTW_PHL_STATUS_FAILURE;
}
if((entry->pkt_info[type].req_cnt-1) != 0) {
return RTW_PHL_STATUS_SUCCESS;
}
if(HAL_PKT_OFLD_DEL(ofld_obj, &entry->pkt_info[type].id)
!= RTW_HAL_STATUS_SUCCESS) {
PHL_ERR("[PKT] %s: delete id(%d) failed.\n", __func__,
entry->pkt_info[type].id);
return RTW_PHL_STATUS_FAILURE;
}
entry->pkt_info[type].id = NOT_USED;
return RTW_PHL_STATUS_SUCCESS;
}
/* For EXTERNAL application to create packet offload object */
enum rtw_phl_status phl_pkt_ofld_init(struct phl_info_t *phl_info)
{
void *d = phl_to_drvpriv(phl_info);
struct pkt_ofld_obj *ofld_obj;
ofld_obj = (struct pkt_ofld_obj *)_os_mem_alloc(d, sizeof(*ofld_obj));
if (ofld_obj == NULL)
return RTW_PHL_STATUS_RESOURCE;
phl_info->pkt_ofld = ofld_obj;
ofld_obj->phl_info = phl_info;
INIT_LIST_HEAD(&ofld_obj->entry_q);
ofld_obj->entry_cnt = 0;
ofld_obj->cur_seq = 0;
_os_mutex_init(d, &ofld_obj->mux);
return RTW_PHL_STATUS_SUCCESS;
}
/* For EXTERNAL application to free packet offload object */
void phl_pkt_ofld_deinit(struct phl_info_t *phl_info)
{
struct pkt_ofld_obj *ofld_obj = phl_info->pkt_ofld;
struct pkt_ofld_entry *pos = NULL;
struct pkt_ofld_entry *n = NULL;
void *d = phl_to_drvpriv(phl_info);
_os_mutex_lock(d, &ofld_obj->mux);
phl_list_for_loop_safe(pos, n, struct pkt_ofld_entry,
&ofld_obj->entry_q, list) {
_phl_pkt_ofld_reset_entry(ofld_obj, pos);
_phl_pkt_ofld_del_entry(ofld_obj, pos);
}
_os_mutex_unlock(d, &ofld_obj->mux);
_os_mutex_deinit(d, &ofld_obj->mux);
_os_mem_free(d, ofld_obj, sizeof(*ofld_obj));
}
/* For EXTERNAL application to reset all entry */
void phl_pkt_ofld_reset_all_entry(struct phl_info_t *phl_info)
{
struct pkt_ofld_obj *ofld_obj = phl_info->pkt_ofld;
struct pkt_ofld_entry *pos = NULL;
void *d = phl_to_drvpriv(phl_info);
_os_mutex_lock(d, &ofld_obj->mux);
phl_list_for_loop(pos, struct pkt_ofld_entry,
&ofld_obj->entry_q, list) {
_phl_pkt_ofld_reset_entry(ofld_obj, pos);
}
_os_mutex_unlock(d, &ofld_obj->mux);
}
/* For EXTERNAL application to add entry to do packet offload (expose)
* @phl: refer to rtw_phl_com_t
* @macid: the mac id of STA
*/
enum rtw_phl_status phl_pkt_ofld_add_entry(struct phl_info_t *phl_info, u16 macid)
{
struct pkt_ofld_obj *ofld_obj = phl_info->pkt_ofld;
void *d = phl_to_drvpriv(ofld_obj->phl_info);
struct pkt_ofld_entry *entry = NULL;
if (ofld_obj == NULL) {
PHL_ERR("[PKT] %s: pkt_ofld_obj is NULL.\n", __func__);
return RTW_PHL_STATUS_FAILURE;
}
_os_mutex_lock(d, &ofld_obj->mux);
if (_phl_pkt_ofld_is_entry_exist(ofld_obj, macid)) {
_os_mutex_unlock(d, &ofld_obj->mux);
return RTW_PHL_STATUS_FAILURE;
}
entry = _os_mem_alloc(d, sizeof(*entry));
if (entry == NULL) {
PHL_ERR("[PKT] %s: alloc memory failed.\n", __func__);
_os_mutex_unlock(d, &ofld_obj->mux);
return RTW_PHL_STATUS_RESOURCE;
}
_phl_pkt_ofld_add_entry(ofld_obj, entry, macid);
_os_mutex_unlock(d, &ofld_obj->mux);
return RTW_PHL_STATUS_SUCCESS;
}
/* For EXTERNAL application to remove entry (expose)
* @phl: refer to rtw_phl_com_t
* @macid: the mac id of STA
*/
enum rtw_phl_status phl_pkt_ofld_del_entry(struct phl_info_t *phl_info, u16 macid)
{
struct pkt_ofld_obj *ofld_obj = phl_info->pkt_ofld;
void *d = phl_to_drvpriv(ofld_obj->phl_info);
struct pkt_ofld_entry *entry = NULL;
if (ofld_obj == NULL) {
PHL_ERR("[PKT] %s: pkt_ofld_obj is NULL.\n", __func__);
return RTW_PHL_STATUS_FAILURE;
}
_os_mutex_lock(d, &ofld_obj->mux);
entry = _phl_pkt_ofld_get_entry(ofld_obj, macid);
if(entry == NULL) {
_os_mutex_unlock(d, &ofld_obj->mux);
PHL_ERR("[PKT] %s, mac id(%d) not found.\n", __func__, macid);
return RTW_PHL_STATUS_FAILURE;
}
PHL_TRACE(COMP_PHL_PKTOFLD, _PHL_INFO_,
"[PKT] Remove entry %p, mac id = %d\n", entry, macid);
_phl_pkt_ofld_del_ofld_type(ofld_obj, entry);
_phl_pkt_ofld_reset_entry(ofld_obj, entry);
_phl_pkt_ofld_del_entry(ofld_obj, entry);
_os_mutex_unlock(d, &ofld_obj->mux);
return RTW_PHL_STATUS_SUCCESS;
}
/* For EXTERNAL application to request packet offload to FW (expose)
* @phl: refer to rtw_phl_com_t
* @macid: the mac id of STA
* @type: The type of packet
* @token: The identifier (return to caller)
* @req_name: The function name of caller
*/
enum rtw_phl_status
phl_pkt_ofld_request(struct phl_info_t *phl_info, u16 macid, u8 type,
u32 *token, const char *req_name, void *buf)
{
struct pkt_ofld_obj *ofld_obj = phl_info->pkt_ofld;
void *d = phl_to_drvpriv(ofld_obj->phl_info);
struct pkt_ofld_entry *entry = NULL;
struct pkt_ofld_req *req = NULL;
if (ofld_obj == NULL) {
PHL_ERR("[PKT] %s: pkt_ofld_obj is NULL.\n", __func__);
return RTW_PHL_STATUS_FAILURE;
}
_os_mutex_lock(d, &ofld_obj->mux);
entry = _phl_pkt_ofld_get_entry(ofld_obj, macid);
if(entry == NULL) {
_os_mutex_unlock(d, &ofld_obj->mux);
PHL_ERR("[PKT] %s, mac id(%d) not found.\n", __func__, macid);
return RTW_PHL_STATUS_FAILURE;
}
req = _phl_pkt_ofld_gen_req(ofld_obj, req_name);
if (req == NULL) {
_os_mutex_unlock(d, &ofld_obj->mux);
return RTW_PHL_STATUS_RESOURCE;
}
_phl_pkt_ofld_add_req(ofld_obj, &entry->pkt_info[type], req);
if(_phl_pkt_ofld_req_type(ofld_obj, entry, type, buf) !=
RTW_PHL_STATUS_SUCCESS) {
_phl_pkt_ofld_del_req(ofld_obj, &entry->pkt_info[type], req);
_os_mutex_unlock(d, &ofld_obj->mux);
return RTW_PHL_STATUS_FAILURE;
}
*token = req->token;
_os_mutex_unlock(d, &ofld_obj->mux);
PHL_TRACE(COMP_PHL_PKTOFLD, _PHL_INFO_,
"[PKT] Request: macid %d, pkt type %s, token %d.\n",
entry->macid, _phl_pkt_ofld_get_txt(type), *token);
return RTW_PHL_STATUS_SUCCESS;
}
/* For EXTERNAL application to cancel request (expose)
* @phl: refer to rtw_phl_com_t
* @macid: the mac id of STA
* @type: The type of packet
* @token: The identifier to get the request to be canceled
*/
enum rtw_phl_status phl_pkt_ofld_cancel(struct phl_info_t *phl_info,
u16 macid, u8 type, u32 *token)
{
struct pkt_ofld_obj *ofld_obj = phl_info->pkt_ofld;
void *d = phl_to_drvpriv(ofld_obj->phl_info);
struct pkt_ofld_entry *entry = NULL;
struct pkt_ofld_req *req = NULL;
struct pkt_ofld_info *pkt_info = NULL;
if (ofld_obj == NULL) {
PHL_ERR("[PKT] %s: pkt_ofld_obj is NULL.\n", __func__);
return RTW_PHL_STATUS_FAILURE;
}
_os_mutex_lock(d, &ofld_obj->mux);
entry = _phl_pkt_ofld_get_entry(ofld_obj, macid);
if(entry == NULL) {
_os_mutex_unlock(d, &ofld_obj->mux);
PHL_ERR("[PKT] %s, macid(%d) not found.\n", __func__, macid);
return RTW_PHL_STATUS_FAILURE;
}
pkt_info = &entry->pkt_info[type];
req = _phl_pkt_ofld_get_req(ofld_obj, pkt_info, *token);
if(req == NULL) {
_os_mutex_unlock(d, &ofld_obj->mux);
PHL_ERR("[PKT] %s, token(%d) not found.\n", __func__, *token);
return RTW_PHL_STATUS_FAILURE;
}
PHL_TRACE(COMP_PHL_PKTOFLD, _PHL_INFO_,
"[PKT] Cancel: macid %d, type %s, token %d.\n",
entry->macid, _phl_pkt_ofld_get_txt(type), *token);
if(_phl_pkt_ofld_cancel_type(ofld_obj, entry, type)
!= RTW_PHL_STATUS_SUCCESS) {
_os_mutex_unlock(d, &ofld_obj->mux);
return RTW_PHL_STATUS_FAILURE;
}
_phl_pkt_ofld_del_req(ofld_obj, pkt_info, req);
_os_mutex_unlock(d, &ofld_obj->mux);
return RTW_PHL_STATUS_SUCCESS;
}
/* For EXTERNAL application to show current info (expose)
* @phl: refer to rtw_phl_com_t
*/
void phl_pkt_ofld_show_info(struct phl_info_t *phl_info)
{
struct pkt_ofld_obj *ofld_obj = phl_info->pkt_ofld;
void *d = phl_to_drvpriv(ofld_obj->phl_info);
_os_mutex_lock(d, &ofld_obj->mux);
_phl_pkt_ofld_dbg_dump(ofld_obj);
_os_mutex_unlock(d, &ofld_obj->mux);
}
/* For EXTERNAL application to get id (expose)
* @phl: refer to rtw_phl_com_t
* @macid: the mac id of STA
* @type: The type of packet
*/
u8 phl_pkt_ofld_get_id(struct phl_info_t *phl_info, u16 macid, u8 type)
{
struct pkt_ofld_obj *ofld_obj = phl_info->pkt_ofld;
void *d = phl_to_drvpriv(ofld_obj->phl_info);
struct pkt_ofld_entry *entry = NULL;
struct pkt_ofld_info *pkt_info = NULL;
if (ofld_obj == NULL) {
PHL_ERR("[PKT] %s: pkt_ofld_obj is NULL.\n", __func__);
return RTW_PHL_STATUS_FAILURE;
}
_os_mutex_lock(d, &ofld_obj->mux);
entry = _phl_pkt_ofld_get_entry(ofld_obj, macid);
if(entry == NULL) {
_os_mutex_unlock(d, &ofld_obj->mux);
PHL_ERR("[PKT] %s, macid(%d) not found.\n", __func__, macid);
return RTW_PHL_STATUS_FAILURE;
}
pkt_info = &entry->pkt_info[type];
PHL_TRACE(COMP_PHL_PKTOFLD, _PHL_INFO_,
"[PKT] Get id: macid %d, pkt type %s, id %d.\n",
entry->macid, _phl_pkt_ofld_get_txt(type),
pkt_info->id);
_os_mutex_unlock(d, &ofld_obj->mux);
return pkt_info->id;
}
const char *phl_get_pkt_ofld_str(enum pkt_ofld_type type)
{
switch(type) {
case PKT_TYPE_NULL_DATA:
return "PKT_TYPE_NULL_DATA";
case PKT_TYPE_ARP_RSP:
return "PKT_TYPE_ARP_RSP";
case PKT_TYPE_NDP:
return "PKT_TYPE_NDP";
case PKT_TYPE_EAPOL_KEY:
return "PKT_TYPE_EAPOL_KEY";
case PKT_TYPE_SA_QUERY:
return "PKT_TYPE_SA_QUERY";
default:
return "UNKNOWN_PKT_TYPE";
}
}
|
2301_81045437/rtl8852be
|
phl/phl_pkt_ofld.c
|
C
|
agpl-3.0
| 29,840
|
/******************************************************************************
*
* Copyright(c) 2019 - 2020 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#ifndef __PHL_PKT_OFLD_H__
#define __PHL_PKT_OFLD_H__
#define TYPE_DATA_FRAME 0x08
#define TYPE_ACTION_FRAME 0xD0
#define TYPE_NULL_FRAME 0x48
#define HDR_OFFSET_FRAME_CONTROL 0
#define HDR_OFFSET_DURATION 2
#define HDR_OFFSET_ADDRESS1 4
#define HDR_OFFSET_ADDRESS2 10
#define HDR_OFFSET_ADDRESS3 16
#define HDR_OFFSET_SEQUENCE 22
#define HDR_OFFSET_ADDRESS4 24
#define SET_80211_PKT_HDR_FRAME_CONTROL(_hdr, _val) \
WriteLE2Byte(_hdr, _val)
#define SET_80211_PKT_HDR_TYPE_AND_SUBTYPE(_hdr, _val) \
WriteLE1Byte(_hdr, _val)
#define SET_80211_PKT_HDR_PROTOCOL_VERSION(_hdr, _val) \
SET_BITS_TO_LE_2BYTE(_hdr, 0, 2, _val)
#define SET_80211_PKT_HDR_TYPE(_hdr, _val) \
SET_BITS_TO_LE_2BYTE(_hdr, 2, 2, _val)
#define SET_80211_PKT_HDR_SUBTYPE(_hdr, _val) \
SET_BITS_TO_LE_2BYTE(_hdr, 4, 4, _val)
#define SET_80211_PKT_HDR_TO_DS(_hdr, _val) \
SET_BITS_TO_LE_2BYTE(_hdr, 8, 1, _val)
#define SET_80211_PKT_HDR_FROM_DS(_hdr, _val) \
SET_BITS_TO_LE_2BYTE(_hdr, 9, 1, _val)
#define SET_80211_PKT_HDR_MORE_FRAG(_hdr, _val) \
SET_BITS_TO_LE_2BYTE(_hdr, 10, 1, _val)
#define SET_80211_PKT_HDR_RETRY(_hdr, _val) \
SET_BITS_TO_LE_2BYTE(_hdr, 11, 1, _val)
#define SET_80211_PKT_HDR_PWR_MGNT(_hdr, _val) \
SET_BITS_TO_LE_2BYTE(_hdr, 12, 1, _val)
#define SET_80211_PKT_HDR_MORE_DATA(_hdr, _val) \
SET_BITS_TO_LE_2BYTE(_hdr, 13, 1, _val)
#define SET_80211_PKT_HDR_PROTECT(_hdr, _val) \
SET_BITS_TO_LE_2BYTE(_hdr, 14, 1, _val)
#define SET_80211_PKT_HDR_DURATION(_hdr, _val) \
WriteLE2Byte((u8 *)(_hdr)+HDR_OFFSET_DURATION, _val)
#define SET_80211_PKT_HDR_ADDRESS1(_h, _hdr, _val) \
_os_mem_cpy(_h, _hdr+HDR_OFFSET_ADDRESS1, _val, MAC_ALEN)
#define SET_80211_PKT_HDR_ADDRESS2(_h, _hdr, _val) \
_os_mem_cpy(_h, _hdr+HDR_OFFSET_ADDRESS2, _val, MAC_ALEN)
#define SET_80211_PKT_HDR_ADDRESS3(_h, _hdr, _val) \
_os_mem_cpy(_h, _hdr+HDR_OFFSET_ADDRESS3, _val, MAC_ALEN)
#define SET_80211_PKT_HDR_FRAGMENT_SEQUENCE(_hdr, _val) \
WriteLE2Byte((u8 *)(_hdr)+HDR_OFFSET_SEQUENCE, _val)
#define NOT_USED 0xFF
#define NULL_PACKET_LEN 24
#define MAC_HDR_LEN 24
#define MAC_HDR_LEN 24
/* 11w SA-Query */
#define SAQ_ACTION_LEN 4
#define SAQ_OFFSET_ACTION 1
#define ACT_TYPE_SA_QUERY 8
#define SET_ACTION_FRAME_CATEGORY(_ptr, _val) WriteLE1Byte(_ptr, _val)
#define SET_SAQ_ACTION_FIELD(_ptr, _val) WriteLE1Byte(_ptr+SAQ_OFFSET_ACTION, _val)
#define LLC_LEN 8
#define LLC_OFFSET_HDR 0
#define LLC_OFFSET_OUI 3
#define LLC_OFFSET_PROTO_ID 6
#define SET_LLC_HDR(_h, _ptr, _val) _os_mem_cpy(d, _ptr+LLC_OFFSET_HDR, _val, 3);
#define SET_LLC_OUI(_h, _ptr, _val) _os_mem_cpy(d, _ptr+LLC_OFFSET_OUI, _val, 3);
#define SET_LLC_PROTO_ID(_h, _ptr, _val) _os_mem_cpy(d, _ptr+LLC_OFFSET_PROTO_ID, _val, 2);
#define AUTH_1X_HDR_LEN 4
#define AUTH_1X_OFFSET_PROTO_VER 0
#define AUTH_1X_OFFSET_PKT_TYPE 1
#define AUTH_1X_OFFSET_LENGTH 2
#define LIB1X_EAPOL_VER 1 /* 00000001B */
#define LIB1X_TYPE_EAPOL_EAPPKT 0 /* 0000 0000B */
#define LIB1X_TYPE_EAPOL_START 1 /* 0000 0001B */
#define LIB1X_TYPE_EAPOL_LOGOFF 2 /* 0000 0010B */
#define LIB1X_TYPE_EAPOL_KEY 3 /* 0000 0011B */
#define LIB1X_TYPE_EAPOL_ENCASFALERT 4 /* 0000 0100B */
#define SET_AUTH_1X_PROTO_VER(_ptr, _val) WriteLE1Byte(_ptr+AUTH_1X_OFFSET_PROTO_VER, _val)
#define SET_AUTH_1X_PKT_TYPE(_ptr, _val) WriteLE1Byte(_ptr+AUTH_1X_OFFSET_PKT_TYPE, _val)
#define SET_AUTH_1X_LENGTH(_ptr, _val) WriteBE2Byte(_ptr+AUTH_1X_OFFSET_LENGTH, _val)
#define EAPOLMSG_HDR_LEN 95
#define EAPOLKEY_OFFSET_KEYDESC_TYPE 0
#define EAPOLKEY_OFFSET_KEY_INFO 1
#define EAPOLKEY_OFFSET_REPLAY_CNT 5
#define EAPOLKEY_KEYDESC_TYPE_RSN 2
#define EAPOLKEY_KEYDESC_VER_1 1
#define EAPOLKEY_KEYDESC_VER_2 2
#define EAPOLKEY_KEYDESC_VER_3 3
#define EAPOLKEY_KEYMIC BIT(8)
#define EAPOLKEY_SECURE BIT(9)
#define SET_EAPOLKEY_KEYDESC_TYPE(_ptr, _val) WriteLE1Byte(_ptr+EAPOLKEY_OFFSET_KEYDESC_TYPE, _val)
#define SET_EAPOLKEY_KEY_INFO(_ptr, _val) WriteBE2Byte(_ptr+EAPOLKEY_OFFSET_KEY_INFO, _val)
#define SET_EAPOLKEY_REPLAY_CNT(_h, _ptr, _val) _os_mem_cpy(_h, _ptr+EAPOLKEY_OFFSET_REPLAY_CNT, _val, 8);
#define HAL_PKT_OFLD_ADD(_pkt, _id, _pkt_buf, _len) \
rtw_hal_pkt_ofld((_pkt)->phl_info->hal, _id, PKT_OFLD_ADD, _pkt_buf, _len)
#define HAL_PKT_OFLD_READ(_pkt, _id) \
rtw_hal_pkt_ofld((_pkt)->phl_info->hal, _id, PKT_OFLD_READ, NULL, NULL)
#define HAL_PKT_OFLD_DEL(_pkt, _id) \
rtw_hal_pkt_ofld((_pkt)->phl_info->hal, _id, PKT_OFLD_DEL, NULL, NULL)
struct pkt_ofld_obj {
_os_mutex mux;
struct phl_info_t *phl_info;
struct list_head entry_q;
u32 entry_cnt;
u32 cur_seq;
};
struct pkt_ofld_req {
struct list_head list;
u32 token;
char *req_name;
u32 req_name_len;
};
struct rtw_pkt_ofld_null_info {
u8 a1[MAC_ADDRESS_LENGTH];
u8 a2[MAC_ADDRESS_LENGTH];
u8 a3[MAC_ADDRESS_LENGTH];
};
struct rtw_pkt_ofld_arp_rsp_info {
u8 a1[MAC_ADDRESS_LENGTH];
u8 a2[MAC_ADDRESS_LENGTH];
u8 a3[MAC_ADDRESS_LENGTH];
u8 host_ipv4_addr[IPV4_ADDRESS_LENGTH];
u8 remote_ipv4_addr[IPV4_ADDRESS_LENGTH];
u8 sec_hdr;
};
struct rtw_pkt_ofld_na_info {
u8 a1[MAC_ADDRESS_LENGTH];
u8 a2[MAC_ADDRESS_LENGTH];
u8 a3[MAC_ADDRESS_LENGTH];
u8 sec_hdr;
};
struct rtw_pkt_ofld_eapol_key_info {
u8 a1[MAC_ADDRESS_LENGTH];
u8 a2[MAC_ADDRESS_LENGTH];
u8 a3[MAC_ADDRESS_LENGTH];
u8 sec_hdr;
u8 key_desc_ver;
u8 replay_cnt[8];
};
struct rtw_pkt_ofld_sa_query_info {
u8 a1[MAC_ADDRESS_LENGTH];
u8 a2[MAC_ADDRESS_LENGTH];
u8 a3[MAC_ADDRESS_LENGTH];
u8 sec_hdr;
};
struct rtw_pkt_ofld_realwow_kapkt_info {
u8 keep_alive_pkt_ptrn[MAX_REALWOW_KCP_SIZE];
u16 keep_alive_pkt_size;
};
struct rtw_pkt_ofld_realwow_ack_info {
u8 ack_ptrn[MAX_REALWOW_PAYLOAD];
u16 ack_ptrn_size;
};
struct rtw_pkt_ofld_realwow_wp_info {
u8 wakeup_ptrn[MAX_REALWOW_PAYLOAD];
u16 wakeup_ptrn_size;
u8 wakeupsecnum; /* ? */
};
/* init api */
enum rtw_phl_status phl_pkt_ofld_init(struct phl_info_t *phl_info);
void phl_pkt_ofld_deinit(struct phl_info_t *phl_info);
void phl_pkt_ofld_reset_all_entry(struct phl_info_t *phl_info);
enum rtw_phl_status phl_pkt_ofld_add_entry(struct phl_info_t *phl_info, u16 macid);
enum rtw_phl_status phl_pkt_ofld_del_entry(struct phl_info_t *phl_info, u16 macid);
#define RTW_PHL_PKT_OFLD_REQ(_phl, _macid, _type, _seq, _buf) \
phl_pkt_ofld_request(_phl, _macid, _type, _seq, __func__, _buf)
enum rtw_phl_status phl_pkt_ofld_request(struct phl_info_t *phl_info,
u16 macid, u8 type,
u32 *token, const char *req_name,
void *buf);
enum rtw_phl_status phl_pkt_ofld_cancel(struct phl_info_t *phl_info,
u16 macid, u8 type, u32 *token);
void phl_pkt_ofld_show_info(struct phl_info_t *phl_info);
u8 phl_pkt_ofld_get_id(struct phl_info_t *phl_info, u16 macid, u8 type);
const char *phl_get_pkt_ofld_str(enum pkt_ofld_type type);
#endif /* __PHL_PKT_OFLD_H__ */
|
2301_81045437/rtl8852be
|
phl/phl_pkt_ofld.h
|
C
|
agpl-3.0
| 7,390
|
/******************************************************************************
*
* Copyright(c) 2021 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#define _PHL_PS_C_
#include "phl_headers.h"
#ifdef CONFIG_POWER_SAVE
const char *phl_ps_op_mode_to_str(u8 op_mode)
{
switch (op_mode) {
case PS_OP_MODE_DISABLED:
return "Disabled";
case PS_OP_MODE_FORCE_ENABLED:
return "Force Enabled";
case PS_OP_MODE_AUTO:
return "Auto";
default:
return "-";
}
}
const char *phl_ps_ps_mode_to_str(u8 ps_mode)
{
switch (ps_mode) {
case PS_MODE_IPS:
return "IPS";
case PS_MODE_LPS:
return "LPS";
default:
return "NONE";
}
}
#define case_pwr_lvl(src) \
case PS_PWR_LVL_##src: return #src
const char *phl_ps_pwr_lvl_to_str(u8 pwr_lvl)
{
switch (pwr_lvl) {
case_pwr_lvl(PWROFF);
case_pwr_lvl(PWR_GATED);
case_pwr_lvl(CLK_GATED);
case_pwr_lvl(RF_OFF);
case_pwr_lvl(PWRON);
case_pwr_lvl(MAX);
default:
return "Undefined";
}
}
u8 phl_ps_judge_pwr_lvl(u8 ps_cap, u8 ps_mode, u8 ps_en)
{
if (!ps_en)
return PS_PWR_LVL_PWRON;
if (ps_mode == PS_MODE_IPS) {
if (ps_cap & PS_CAP_PWR_OFF)
return PS_PWR_LVL_PWROFF;
else if (ps_cap & PS_CAP_PWR_GATED)
return PS_PWR_LVL_PWR_GATED;
else if (ps_cap & PS_CAP_CLK_GATED)
return PS_PWR_LVL_CLK_GATED;
else if (ps_cap & PS_CAP_RF_OFF)
return PS_PWR_LVL_RF_OFF;
else
return PS_PWR_LVL_PWROFF; /* ips default support power off */
} else if (ps_mode == PS_MODE_LPS) {
if (ps_cap & PS_CAP_PWR_GATED)
return PS_PWR_LVL_PWR_GATED;
else if (ps_cap & PS_CAP_CLK_GATED)
return PS_PWR_LVL_CLK_GATED;
else if (ps_cap & PS_CAP_RF_OFF)
return PS_PWR_LVL_RF_OFF;
else if (ps_cap & PS_CAP_PWRON)
return PS_PWR_LVL_PWRON;
else
return PS_PWR_LVL_PWRON; /* lps default support protocol */
} else {
PHL_TRACE(COMP_PHL_PS, _PHL_WARNING_, "[PS], %s(): unknown ps mode!\n", __func__);
}
return PS_PWR_LVL_PWRON;
}
static void _ps_ntfy_before_pwr_cfg(struct phl_info_t *phl_info, u8 cur_pwr_lvl, u8 req_pwr_lvl)
{
PHL_TRACE(COMP_PHL_PS, _PHL_INFO_, "[PS], %s(): \n", __func__);
if (cur_pwr_lvl == PS_PWR_LVL_PWRON) { /* enter ps */
if (req_pwr_lvl == PS_PWR_LVL_PWROFF) {
#ifdef CONFIG_BTCOEX
rtw_hal_btc_radio_state_ntfy(phl_info->hal, BTC_RFCTRL_WL_OFF);
#endif
#if defined(CONFIG_PCI_HCI) && defined(RTW_WKARD_DYNAMIC_LTR)
phl_ltr_sw_ctrl_ntfy(phl_info->phl_com, false);
#endif
} else if (req_pwr_lvl <= PS_PWR_LVL_RF_OFF) {
#ifdef CONFIG_BTCOEX
rtw_hal_btc_radio_state_ntfy(phl_info->hal, BTC_RFCTRL_FW_CTRL);
#endif
#if defined(CONFIG_PCI_HCI) && defined(RTW_WKARD_DYNAMIC_LTR)
if (req_pwr_lvl == PS_PWR_LVL_PWR_GATED)
phl_ltr_sw_ctrl_ntfy(phl_info->phl_com, false);
#endif
}
}
}
static void _ps_ntfy_after_pwr_cfg(struct phl_info_t *phl_info, u8 cur_pwr_lvl, u8 req_pwr_lvl, u8 cfg_ok)
{
PHL_TRACE(COMP_PHL_PS, _PHL_INFO_, "[PS], %s(): \n", __func__);
if (cur_pwr_lvl > req_pwr_lvl) { /* enter ps */
if (!cfg_ok) { /* fail */
if (req_pwr_lvl == PS_PWR_LVL_PWROFF) { /* ips */
#ifdef CONFIG_BTCOEX
rtw_hal_btc_radio_state_ntfy(phl_info->hal, BTC_RFCTRL_WL_ON);
#endif
#if defined(CONFIG_PCI_HCI) && defined(RTW_WKARD_DYNAMIC_LTR)
phl_ltr_sw_ctrl_ntfy(phl_info->phl_com, true);
#endif
} else { /* lps */
#ifdef CONFIG_BTCOEX
rtw_hal_btc_radio_state_ntfy(phl_info->hal, BTC_RFCTRL_LPS_WL_ON);
#endif
#if defined(CONFIG_PCI_HCI) && defined(RTW_WKARD_DYNAMIC_LTR)
phl_ltr_sw_ctrl_ntfy(phl_info->phl_com, true);
#endif
}
}
} else { /* leave ps */
if (cfg_ok) { /* ok */
if (cur_pwr_lvl == PS_PWR_LVL_PWROFF) { /* ips */
if (req_pwr_lvl == PS_PWR_LVL_PWRON) {
#ifdef CONFIG_BTCOEX
rtw_hal_btc_radio_state_ntfy(phl_info->hal, BTC_RFCTRL_WL_ON);
#endif
#if defined(CONFIG_PCI_HCI) && defined(RTW_WKARD_DYNAMIC_LTR)
phl_ltr_sw_ctrl_ntfy(phl_info->phl_com, true);
#endif
}
} else { /* lps */
if (req_pwr_lvl == PS_PWR_LVL_PWRON) {
#ifdef CONFIG_BTCOEX
rtw_hal_btc_radio_state_ntfy(phl_info->hal, BTC_RFCTRL_LPS_WL_ON);
#endif
#if defined(CONFIG_PCI_HCI) && defined(RTW_WKARD_DYNAMIC_LTR)
phl_ltr_sw_ctrl_ntfy(phl_info->phl_com, true);
#endif
}
}
}
}
}
enum rtw_phl_status
phl_ps_cfg_pwr_lvl(struct phl_info_t *phl_info, u8 ps_mode, u8 cur_pwr_lvl, u8 req_pwr_lvl)
{
enum rtw_hal_status hstatus = RTW_HAL_STATUS_SUCCESS;
PHL_TRACE(COMP_PHL_PS, _PHL_INFO_, "[PS], %s(): from %s to %s.\n",
__func__, phl_ps_pwr_lvl_to_str(cur_pwr_lvl), phl_ps_pwr_lvl_to_str(req_pwr_lvl));
if (cur_pwr_lvl == req_pwr_lvl)
PHL_TRACE(COMP_PHL_PS, _PHL_WARNING_, "[PS], %s(): pwr lvl is not change!\n", __func__);
_ps_ntfy_before_pwr_cfg(phl_info, cur_pwr_lvl, req_pwr_lvl);
hstatus = rtw_hal_ps_pwr_lvl_cfg(phl_info->phl_com, phl_info->hal,
req_pwr_lvl);
_ps_ntfy_after_pwr_cfg(phl_info, cur_pwr_lvl, req_pwr_lvl,
(hstatus == RTW_HAL_STATUS_SUCCESS ? true : false));
return (hstatus == RTW_HAL_STATUS_SUCCESS) ? RTW_PHL_STATUS_SUCCESS : RTW_PHL_STATUS_FAILURE;
}
static void _ps_ntfy_before_lps_proto_cfg(struct phl_info_t *phl_info, u8 lps_en)
{
PHL_TRACE(COMP_PHL_PS, _PHL_INFO_, "[PS], %s(): \n", __func__);
if (lps_en) { /* enter lps */
#ifdef CONFIG_BTCOEX
rtw_hal_btc_radio_state_ntfy(phl_info->hal, BTC_RFCTRL_LPS_WL_ON);
#endif
}
}
static void
_ps_ntfy_after_lps_proto_cfg(struct phl_info_t *phl_info, u8 lps_en, u8 cfg_ok)
{
PHL_TRACE(COMP_PHL_PS, _PHL_INFO_, "[PS], %s(): \n", __func__);
if (lps_en) { /* enter lps */
if (!cfg_ok) { /* fail */
#ifdef CONFIG_BTCOEX
rtw_hal_btc_radio_state_ntfy(phl_info->hal, BTC_RFCTRL_WL_ON);
#endif
}
} else { /* leave lps */
if (cfg_ok) { /* ok */
#ifdef CONFIG_BTCOEX
rtw_hal_btc_radio_state_ntfy(phl_info->hal, BTC_RFCTRL_WL_ON);
#endif
}
}
}
enum rtw_phl_status
phl_ps_lps_cfg(struct phl_info_t *phl_info, struct ps_cfg *cfg, u8 lps_en)
{
enum rtw_phl_status status = RTW_PHL_STATUS_SUCCESS;
struct rtw_hal_lps_info lps_info;
#ifdef RTW_WKARD_LPS_ROLE_CONFIG
struct rtw_wifi_role_t *wrole = NULL;
struct rtw_phl_stainfo_t *sta = NULL;
sta = rtw_phl_get_stainfo_by_macid(phl_info, cfg->macid);
if (sta != NULL) {
wrole = sta->wrole;
} else {
PHL_TRACE(COMP_PHL_PS, _PHL_WARNING_, "[PS], %s(): cannot get sta!\n", __func__);
}
#endif
if (RTW_PHL_STATUS_SUCCESS != phl_snd_cmd_ntfy_ps(phl_info, wrole, lps_en)) {
status = RTW_PHL_STATUS_FAILURE;
return status;
}
if (lps_en) {
PHL_TRACE(COMP_PHL_PS, _PHL_INFO_, "[PS], %s(): enter lps, macid %d.\n", __func__, cfg->macid);
#ifdef RTW_WKARD_LPS_ROLE_CONFIG
phl_role_suspend_unused_role(phl_info, wrole);
#endif
} else {
PHL_TRACE(COMP_PHL_PS, _PHL_INFO_, "[PS], %s(): leave lps, macid %d.\n", __func__, cfg->macid);
#ifdef RTW_WKARD_LPS_ROLE_CONFIG
phl_role_recover_unused_role(phl_info, wrole);
#endif
}
lps_info.lps_en = lps_en;
lps_info.macid = cfg->macid;
lps_info.listen_bcn_mode = cfg->listen_bcn_mode;
lps_info.awake_interval = cfg->awake_interval;
lps_info.smart_ps_mode = cfg->smart_ps_mode;
_ps_ntfy_before_lps_proto_cfg(phl_info, lps_en);
if (rtw_hal_ps_lps_cfg(phl_info->hal, &lps_info) != RTW_HAL_STATUS_SUCCESS) {
PHL_TRACE(COMP_PHL_PS, _PHL_ERR_, "[PS], %s(): config lps fail.\n", __func__);
rtw_hal_notification(phl_info->hal, MSG_EVT_DBG_TX_DUMP, HW_PHY_0);
status = RTW_PHL_STATUS_FAILURE;
}
_ps_ntfy_after_lps_proto_cfg(phl_info, lps_en, (status == RTW_PHL_STATUS_SUCCESS ? true : false));
return status;
}
static enum rtw_phl_status _lps_enter_proto_cfg(struct phl_info_t *phl_info, struct ps_cfg *cfg)
{
enum rtw_phl_status status = RTW_PHL_STATUS_FAILURE;
struct rtw_pkt_ofld_null_info null_info = {0};
struct rtw_phl_stainfo_t *phl_sta = NULL;
void *d = phl_to_drvpriv(phl_info);
PHL_TRACE(COMP_PHL_PS, _PHL_INFO_, "[PS], %s(): \n", __func__);
rtw_hal_cfg_fw_ps_log(phl_info->hal, true);
phl_sta = rtw_phl_get_stainfo_by_macid(phl_info, cfg->macid);
if (phl_sta == NULL)
return RTW_PHL_STATUS_FAILURE;
_os_mem_cpy(d, &(null_info.a1[0]), &(phl_sta->mac_addr[0]),
MAC_ADDRESS_LENGTH);
_os_mem_cpy(d,&(null_info.a2[0]), &(phl_sta->wrole->mac_addr[0]),
MAC_ADDRESS_LENGTH);
_os_mem_cpy(d, &(null_info.a3[0]), &(phl_sta->mac_addr[0]),
MAC_ADDRESS_LENGTH);
status = RTW_PHL_PKT_OFLD_REQ(phl_info, cfg->macid,
PKT_TYPE_NULL_DATA, cfg->token, &null_info);
if (status != RTW_PHL_STATUS_SUCCESS) {
PHL_TRACE(COMP_PHL_PS, _PHL_ERR_, "[PS], %s(): add null pkt ofld fail!\n", __func__);
return status;
}
status = phl_ps_lps_cfg(phl_info, cfg, true);
if (status != RTW_PHL_STATUS_SUCCESS) {
PHL_TRACE(COMP_PHL_PS, _PHL_ERR_, "[PS], %s(): config lps fail!\n", __func__);
return status;
}
return RTW_PHL_STATUS_SUCCESS;
}
static enum rtw_phl_status _lps_leave_proto_cfg(struct phl_info_t *phl_info, struct ps_cfg *cfg)
{
enum rtw_phl_status status = RTW_PHL_STATUS_FAILURE;
PHL_TRACE(COMP_PHL_PS, _PHL_INFO_, "[PS], %s(): \n", __func__);
status = phl_ps_lps_cfg(phl_info, cfg, false);
if (status != RTW_PHL_STATUS_SUCCESS) {
PHL_TRACE(COMP_PHL_PS, _PHL_ERR_, "[PS], %s(): config lps fail!\n", __func__);
return status;
}
status = phl_pkt_ofld_cancel(phl_info, cfg->macid,
PKT_TYPE_NULL_DATA, cfg->token);
if (status != RTW_PHL_STATUS_SUCCESS) {
PHL_TRACE(COMP_PHL_PS, _PHL_ERR_, "[PS], %s(): del null pkt ofld fail!\n", __func__);
return status;
}
return RTW_PHL_STATUS_SUCCESS;
}
enum rtw_phl_status phl_ps_lps_proto_cfg(struct phl_info_t *phl_info, struct ps_cfg *cfg, bool lps_en)
{
if (lps_en)
return _lps_enter_proto_cfg(phl_info, cfg);
else
return _lps_leave_proto_cfg(phl_info, cfg);
}
enum rtw_phl_status phl_ps_lps_enter(struct phl_info_t *phl_info, struct ps_cfg *cfg)
{
enum rtw_phl_status status = RTW_PHL_STATUS_FAILURE;
if (cfg->proto_cfg) {
status = phl_ps_lps_proto_cfg(phl_info, cfg, true);
if (status != RTW_PHL_STATUS_SUCCESS) {
PHL_TRACE(COMP_PHL_PS, _PHL_ERR_, "[PS], %s(): config lps protocol fail!\n", __func__);
return status;
}
}
if (cfg->pwr_cfg) {
status = phl_ps_cfg_pwr_lvl(phl_info, cfg->ps_mode, cfg->cur_pwr_lvl, cfg->pwr_lvl);
if (status != RTW_PHL_STATUS_SUCCESS) {
PHL_TRACE(COMP_PHL_PS, _PHL_ERR_, "[PS], %s(): config lps pwr lvl fail!\n", __func__);
return status;
}
}
return RTW_PHL_STATUS_SUCCESS;
}
enum rtw_phl_status phl_ps_lps_leave(struct phl_info_t *phl_info, struct ps_cfg *cfg)
{
enum rtw_phl_status status = RTW_PHL_STATUS_FAILURE;
if (cfg->pwr_cfg) {
status = phl_ps_cfg_pwr_lvl(phl_info, cfg->ps_mode, cfg->cur_pwr_lvl, cfg->pwr_lvl);
if (status != RTW_PHL_STATUS_SUCCESS) {
PHL_TRACE(COMP_PHL_PS, _PHL_ERR_, "[PS], %s(): config lps pwr lvl fail!\n", __func__);
return status;
}
}
if (cfg->proto_cfg) {
status = phl_ps_lps_proto_cfg(phl_info, cfg, false);
if (status != RTW_PHL_STATUS_SUCCESS) {
PHL_TRACE(COMP_PHL_PS, _PHL_ERR_, "[PS], %s(): config lps protocol fail!\n", __func__);
return status;
}
}
return RTW_PHL_STATUS_SUCCESS;
}
enum rtw_phl_status phl_ps_ips_enter(struct phl_info_t *phl_info, struct ps_cfg *cfg)
{
return phl_ps_cfg_pwr_lvl(phl_info, cfg->ps_mode, cfg->cur_pwr_lvl, cfg->pwr_lvl);
}
enum rtw_phl_status phl_ps_ips_leave(struct phl_info_t *phl_info, struct ps_cfg *cfg)
{
return phl_ps_cfg_pwr_lvl(phl_info, cfg->ps_mode, cfg->cur_pwr_lvl, cfg->pwr_lvl);
}
enum rtw_phl_status
phl_ps_enter_ps(struct phl_info_t *phl_info, struct ps_cfg *cfg)
{
enum rtw_phl_status status = RTW_PHL_STATUS_FAILURE;
PHL_TRACE(COMP_PHL_PS, _PHL_INFO_, "[PS], %s(): ps mode(%s), pwr lvl(%s), macid(%d), token(0x%x), proto_cfg(%d), pwr_cfg(%d)\n",
__func__, phl_ps_ps_mode_to_str(cfg->ps_mode), phl_ps_pwr_lvl_to_str(cfg->pwr_lvl),
cfg->macid, (cfg->token == NULL) ? 0xFF : *cfg->token, cfg->proto_cfg, cfg->pwr_cfg);
if (cfg->ps_mode == PS_MODE_LPS)
status = phl_ps_lps_enter(phl_info, cfg);
else if (cfg->ps_mode == PS_MODE_IPS)
status = phl_ps_ips_enter(phl_info, cfg);
return status;
}
enum rtw_phl_status
phl_ps_leave_ps(struct phl_info_t *phl_info, struct ps_cfg *cfg)
{
enum rtw_phl_status status = RTW_PHL_STATUS_FAILURE;
PHL_TRACE(COMP_PHL_PS, _PHL_INFO_, "[PS], %s(): current ps mode(%s), pwr lvl(%s), macid(%d), token(0x%x), proto_cfg(%d), pwr_cfg(%d)\n",
__func__, phl_ps_ps_mode_to_str(cfg->ps_mode), phl_ps_pwr_lvl_to_str(cfg->pwr_lvl),
cfg->macid, (cfg->token == NULL) ? 0xFF : *cfg->token, cfg->proto_cfg, cfg->pwr_cfg);
if (cfg->ps_mode == PS_MODE_LPS)
status = phl_ps_lps_leave(phl_info, cfg);
else if (cfg->ps_mode == PS_MODE_IPS)
status = phl_ps_ips_leave(phl_info, cfg);
return status;
}
#endif
|
2301_81045437/rtl8852be
|
phl/phl_ps.c
|
C
|
agpl-3.0
| 13,174
|
/******************************************************************************
*
* Copyright(c) 2021 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#ifndef _PHL_PS_H_
#define _PHL_PS_H_
#ifdef CONFIG_POWER_SAVE
enum phl_ps_mode {
PS_MODE_NONE,
PS_MODE_LPS,
PS_MODE_IPS
};
/* use to configure specific pwr mode along with pwr lvl and others */
struct ps_cfg {
/* common */
u8 ps_mode;
u8 cur_pwr_lvl;
u8 pwr_lvl;
/* lps */
bool pwr_cfg; /* whether to configure pwr lvl */
bool proto_cfg; /* whether to configure protocol */
u16 macid;
u32 *token;
u8 listen_bcn_mode;
u8 awake_interval;
u8 smart_ps_mode;
};
#define _get_ps_cap(_phl_info) (&_phl_info->phl_com->dev_cap.ps_cap)
#define _get_ps_sw_cap(_phl_info) (&_phl_info->phl_com->dev_sw_cap.ps_cap)
const char *phl_ps_op_mode_to_str(u8 op_mode);
const char *phl_ps_ps_mode_to_str(u8 ps_mode);
const char *phl_ps_pwr_lvl_to_str(u8 pwr_lvl);
u8 phl_ps_judge_pwr_lvl(u8 ps_cap, u8 ps_mode, u8 ps_en);
enum rtw_phl_status phl_ps_lps_cfg(struct phl_info_t *phl_info, struct ps_cfg *cfg, u8 lps_en);
enum rtw_phl_status phl_ps_cfg_pwr_lvl(struct phl_info_t *phl_info, u8 ps_mode, u8 cur_pwr_lvl, u8 req_pwr_lvl);
enum rtw_phl_status phl_ps_enter_ps(struct phl_info_t *phl_info, struct ps_cfg *cfg);
enum rtw_phl_status phl_ps_leave_ps(struct phl_info_t *phl_info, struct ps_cfg *cfg);
#endif
#endif /* _PHL_PS_H_ */
|
2301_81045437/rtl8852be
|
phl/phl_ps.h
|
C
|
agpl-3.0
| 1,882
|
/******************************************************************************
*
* Copyright(c) 2020 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#include "phl_headers.h"
#include "phl_chnlplan.h"
#include "phl_country.h"
#include "phl_regulation_6g.h"
extern const struct regulatory_domain_mapping rdmap[MAX_RD_MAP_NUM];
extern const struct chdef_2ghz chdef2g[MAX_CHDEF_2GHZ];
extern const struct chdef_5ghz chdef5g[MAX_CHDEF_5GHZ];
extern const struct country_domain_mapping cdmap[MAX_COUNTRY_NUM];
/*
* @ Function description
* Convert 2 ghz channels from bit definition and then fill to
* struct rtw_regulation_channel *ch array[] and
* *ch_cnt will also be calculated.
*
* @ parameter
* *rg : internal regulatory information
* *ch_cnt : final converted 2ghz channel numbers.
* *rch : converted channels will be filled here.
* ch : 2 ghz bit difinitions
* passive : 2 ghz passive bit difinitions
*
*/
static void _convert_ch2g(struct rtw_regulation *rg, u32 *ch_cnt,
struct rtw_regulation_channel *rch, u16 ch, u16 passive)
{
u8 i = 0, property = 0;
u32 shift = 0, cnt = 0;
PHL_INFO("[REGU], convert 2 ghz channels\n");
for (i = 0; i < MAX_CH_NUM_2GHZ; i++) {
property = 0;
shift = (1 << i);
if (ch & shift) {
rch[*ch_cnt].band = BAND_ON_24G;
rch[*ch_cnt].channel = (u8)(i + 1);
if (passive & shift)
property |= CH_PASSIVE;
rch[*ch_cnt].property = property;
(*ch_cnt)++;
PHL_INFO("[REGU], ch: %d%s\n", (i + 1),
((property & CH_PASSIVE) ? ", passive" : " " ));
cnt++;
}
}
PHL_INFO("[REGU], converted channels : %d\n", cnt);
}
static enum rtw_regulation_status _chnlplan_update_2g(
struct rtw_regulation *rg, const struct freq_plan *f)
{
const struct chdef_2ghz *chdef = NULL;
struct rtw_regulation_chplan_group *plan = NULL;
u16 i = 0, ch = 0, passive = 0;
if (!f)
return REGULATION_FAILURE;
if (f->regulation >= REGULATION_MAX)
return REGULATION_FAILURE;
for (i = 0; i < MAX_CHDEF_2GHZ; i++) {
if (f->ch_idx == chdef2g[i].idx) {
chdef = &chdef2g[i];
break;
}
}
if (!chdef)
return REGULATION_FAILURE;
rg->ch_idx2g = f->ch_idx;
rg->regulation_2g = f->regulation;
plan = &rg->chplan[FREQ_GROUP_2GHZ];
plan->cnt = 0;
ch = ((chdef->support_ch[1] << 8) | (chdef->support_ch[0]));
passive = ((chdef->passive[1] << 8) | (chdef->passive[0]));
_convert_ch2g(rg, &plan->cnt, plan->ch, ch, passive);
PHL_INFO("[REGU], 2 GHz, total channel = %d\n", plan->cnt);
return REGULATION_SUCCESS;
}
static void _get_5ghz_ch_info(const struct chdef_5ghz *chdef,
u8 group, u16 *ch, u16 *passive, u16 *dfs, u8 *max_num, u8 *ch_start)
{
switch (group) {
case FREQ_GROUP_5GHZ_BAND1:
*ch = chdef->support_ch_b1;
*passive = chdef->passive_b1;
*dfs = chdef->dfs_b1;
*max_num = MAX_CH_NUM_BAND1;
*ch_start = 36;
break;
case FREQ_GROUP_5GHZ_BAND2:
*ch = chdef->support_ch_b2;
*passive = chdef->passive_b2;
*dfs = chdef->dfs_b2;
*max_num = MAX_CH_NUM_BAND2;
*ch_start = 52;
break;
case FREQ_GROUP_5GHZ_BAND3:
*ch = ((chdef->support_ch_b3[1] << 8) |
(chdef->support_ch_b3[0]));
*passive = ((chdef->passive_b3[1] << 8) |
(chdef->passive_b3[0]));
*dfs = ((chdef->dfs_b3[1] << 8) |
(chdef->dfs_b3[0])) ;
*max_num = MAX_CH_NUM_BAND3;
*ch_start = 100;
break;
case FREQ_GROUP_5GHZ_BAND4:
*ch = chdef->support_ch_b4;
*passive = chdef->passive_b4;
*dfs = chdef->dfs_b4;
*max_num = MAX_CH_NUM_BAND4;
*ch_start = 149;
break;
default:
*ch = 0;
*passive = 0;
*dfs = 0;
*max_num = 0;
*ch_start = 0;
break;
}
}
/*
* @ Function description
* Convert 5 ghz channels from bit definition and then fill to
* struct rtw_regulation_channel *ch array[] and
* *ch_cnt will also be calculated.
*
* @ parameter
* band_5g : 1~4 (5g band-1 ~ 5g band-4)
* *rg : internal regulatory information
* *ch_cnt : final converted 2ghz channel numbers.
* *rch : converted channels will be filled here.
* ch : 5 ghz bnad channel bit difinitions
* passive : 5 ghz band passive bit difinitions
* dfs : 5 ghz band dfs bit difinitions
* max_num : maximum channel numbers of the 5 ghz band.
* ch_start : start channel index of the 5 ghz band.
*/
static void _convert_ch5g(u8 band_5g, struct rtw_regulation *rg,
u32 *ch_cnt, struct rtw_regulation_channel *rch,
u16 ch, u16 passive, u16 dfs, u8 max_num, u8 ch_start)
{
u16 i = 0;
u32 shift = 0;
u8 property = 0;
u32 cnt = 0;
PHL_INFO("[REGU], convert 5ghz band-%d channels, from %d, ch=0x%x, passive = 0x%x, dfs=0x%x \n",
band_5g, ch_start, ch, passive, dfs);
for (i = 0; i < max_num; i++) {
shift = (1 << i);
if (ch & shift) {
property = 0;
rch[*ch_cnt].band = BAND_ON_5G;
rch[*ch_cnt].channel = (u8)(ch_start + (i * 4));
if (passive & shift)
property |= CH_PASSIVE;
if (dfs & shift)
property |= CH_DFS;
rch[*ch_cnt].property = property;
PHL_INFO("[REGU], ch: %d%s%s \n",
rch[*ch_cnt].channel,
((property & CH_PASSIVE) ? ", passive" : ""),
((property & CH_DFS) ? ", dfs" : ""));
(*ch_cnt)++;
cnt++;
}
}
PHL_INFO("[REGU], converted channels : %d\n", cnt);
}
static enum rtw_regulation_status _chnlplan_update_5g(
struct rtw_regulation *rg, const struct freq_plan *f)
{
const struct chdef_5ghz *chdef = NULL;
struct rtw_regulation_chplan_group *plan = NULL;
u8 group = FREQ_GROUP_5GHZ_BAND1;
u8 max_num = 0, ch_start = 0;
u16 i = 0, ch = 0, passive = 0, dfs = 0;
u32 total = 0;
if (!f)
return REGULATION_FAILURE;
if (f->regulation >= REGULATION_MAX)
return REGULATION_FAILURE;
for (i = 0; i < MAX_CHDEF_5GHZ; i++) {
if (f->ch_idx == chdef5g[i].idx) {
chdef = &chdef5g[i];
break;
}
}
if (!chdef)
return REGULATION_FAILURE;
rg->ch_idx5g = f->ch_idx;
rg->regulation_5g = f->regulation;
for (i = 0; i < 4; i++) {
group = (u8)(i + FREQ_GROUP_5GHZ_BAND1);
plan = &rg->chplan[group];
plan->cnt = 0;
_get_5ghz_ch_info(chdef, group,
&ch, &passive, &dfs, &max_num, &ch_start);
_convert_ch5g((u8)(i + 1), rg, &plan->cnt, plan->ch,
ch, passive, dfs, max_num, ch_start);
total += plan->cnt;
}
PHL_INFO("[REGU], 5 GHz, total channel = %d\n", total);
return REGULATION_SUCCESS;
}
static enum rtw_regulation_status _regulatory_domain_update(
struct rtw_regulation *rg, u8 did, enum regulation_rsn reason)
{
enum rtw_regulation_status status = REGULATION_SUCCESS;
const struct freq_plan *plan_2g = NULL;
const struct freq_plan *plan_5g = NULL;
plan_2g = &rdmap[did].freq_2g;
plan_5g = &rdmap[did].freq_5g;
rg->domain.code = rdmap[did].domain_code;
rg->domain.reason = reason;
status = _chnlplan_update_2g(rg, plan_2g);
if (status != REGULATION_SUCCESS)
return status;
status = _chnlplan_update_5g(rg, plan_5g);
if (status != REGULATION_SUCCESS)
return status;
return status;
}
static void _get_group_chplan(struct rtw_regulation *rg,
struct rtw_regulation_chplan_group *group,
struct rtw_regulation_chplan *plan)
{
u32 i = 0;
u8 dfs = 0;
for (i = 0; i < group->cnt; i++) {
dfs = ((group->ch[i].property & CH_DFS) ? 1 : 0);
if ((group->ch[i].channel) &&
(!dfs || ((rg->capability & CAPABILITY_DFS) && dfs))) {
plan->ch[plan->cnt].band =
group->ch[i].band;
plan->ch[plan->cnt].channel =
group->ch[i].channel;
plan->ch[plan->cnt].property =
group->ch[i].property;
plan->cnt++;
}
}
}
static u8 _domain_index(u8 domain)
{
u8 i = 0;
for (i = 0; i < MAX_RD_MAP_NUM; i++) {
if (domain == rdmap[i].domain_code) {
return i;
}
}
return MAX_RD_MAP_NUM;
}
static enum rtw_regulation_status _get_chnlplan(struct rtw_regulation *rg,
enum rtw_regulation_query type,
struct rtw_regulation_chplan *plan)
{
struct rtw_regulation_chplan_group *group = NULL;
if (rg->domain.code == INVALID_DOMAIN_CODE)
return REGULATION_INVALID_DOMAIN;
plan->cnt = 0;
/* 2ghz */
if (rg->capability & CAPABILITY_2GHZ) {
if (type == REGULQ_CHPLAN_FULL ||
type == REGULQ_CHPLAN_2GHZ_5GHZ ||
type == REGULQ_CHPLAN_2GHZ) {
group = &rg->chplan[FREQ_GROUP_2GHZ];
_get_group_chplan(rg, group, plan);
}
}
/* 5ghz */
if (rg->capability & CAPABILITY_5GHZ) {
/* band1 */
if (type == REGULQ_CHPLAN_FULL ||
type == REGULQ_CHPLAN_2GHZ_5GHZ ||
type == REGULQ_CHPLAN_5GHZ_ALL ||
type == REGULQ_CHPLAN_5GHZ_BAND1) {
group = &rg->chplan[FREQ_GROUP_5GHZ_BAND1];
_get_group_chplan(rg, group, plan);
}
/* band2 */
if (type == REGULQ_CHPLAN_FULL ||
type == REGULQ_CHPLAN_2GHZ_5GHZ ||
type == REGULQ_CHPLAN_5GHZ_ALL ||
type == REGULQ_CHPLAN_5GHZ_BAND2) {
group = &rg->chplan[FREQ_GROUP_5GHZ_BAND2];
_get_group_chplan(rg, group, plan);
}
/* band3 */
if (type == REGULQ_CHPLAN_FULL ||
type == REGULQ_CHPLAN_2GHZ_5GHZ ||
type == REGULQ_CHPLAN_5GHZ_ALL ||
type == REGULQ_CHPLAN_5GHZ_BAND3) {
group = &rg->chplan[FREQ_GROUP_5GHZ_BAND3];
_get_group_chplan(rg, group, plan);
}
/* band4 */
if (type == REGULQ_CHPLAN_FULL ||
type == REGULQ_CHPLAN_2GHZ_5GHZ ||
type == REGULQ_CHPLAN_5GHZ_ALL ||
type == REGULQ_CHPLAN_5GHZ_BAND4) {
group = &rg->chplan[FREQ_GROUP_5GHZ_BAND4];
_get_group_chplan(rg, group, plan);
}
}
#ifdef CONFIG_6GHZ
regu_get_chnlplan_6g(rg, type, plan);
#endif
return REGULATION_SUCCESS;
}
static bool _valid_property(u8 property, u8 reject)
{
u8 i = 0;
/* accept all property */
if (!reject)
return true;
/* check if ch property rejected */
for (i = 0; i < 8; i++) {
if ((BIT(i) & property) & reject)
return false;
}
return true;
}
static void _filter_chnlplan(void *d,
struct rtw_regulation_chplan *plan,
struct rtw_chlist *filter)
{
struct rtw_regulation_chplan inplan = {0};
u32 i = 0, j = 0, k = 0;
if (!d || !plan || !filter)
return;
if (plan->cnt < filter->cnt)
return;
_os_mem_cpy(d, &inplan, plan, sizeof(struct rtw_regulation_chplan));
/*
* generate output chplan
* ex: filter : {1, 6}, inplan : {1, 6, 6, 11}, ouput => {1, 6, 6}
*/
plan->cnt = 0;
for (i = 0; i < filter->cnt; i++) {
for (j = 0; j < inplan.cnt; j++) {
if ((filter->ch[i].band == inplan.ch[j].band) &&
(filter->ch[i].ch == inplan.ch[j].channel)) {
plan->ch[k].band = inplan.ch[j].band;
plan->ch[k].channel = inplan.ch[j].channel;
plan->ch[k].property = inplan.ch[j].property;
k++;
plan->cnt++;
}
}
}
}
static bool _regulation_valid(void *phl)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct rtw_regulation *rg = NULL;
void *d = NULL;
bool valid = false;
if (!phl)
return false;
rg = &phl_info->regulation;
if (!rg->init)
return false;
d = phl_to_drvpriv(phl_info);
_os_spinlock(d, &rg->lock, _bh, NULL);
valid = rg->valid;
_os_spinunlock(d, &rg->lock, _bh, NULL);
return valid;
}
static bool _query_channel(struct rtw_regulation *rg,
enum band_type band, u16 channel,
struct rtw_regulation_channel *ch)
{
struct rtw_regulation_chplan_group *plan = NULL;
u32 i = 0, j = 0;
if ((BAND_2GHZ(band) && !(rg->capability & CAPABILITY_2GHZ)) ||
(BAND_5GHZ(band) && !(rg->capability & CAPABILITY_5GHZ)) ||
(BAND_6GHZ(band) && !(rg->capability & CAPABILITY_6GHZ)))
return false;
for (i = FREQ_GROUP_2GHZ; i < FREQ_GROUP_MAX; i++) {
plan = &rg->chplan[i];
for (j = 0; j < plan->cnt; j++) {
if (channel == plan->ch[j].channel) {
ch->band = plan->ch[j].band;
ch->channel = plan->ch[j].channel;
ch->property = plan->ch[j].property;
return true;
}
}
}
return false;
}
static void _display_chplan(struct rtw_regulation_chplan *plan)
{
u32 i = 0;
for (i = 0; i < plan->cnt; i++) {
PHL_INFO("[REGU], %d, %shz: ch %d%s%s%s\n", (i + 1),
((plan->ch[i].band == BAND_ON_24G) ? "2g" :
((plan->ch[i].band == BAND_ON_5G) ? "5g" :
((plan->ch[i].band == BAND_ON_6G) ? "6g" : ""))),
(plan->ch[i].channel),
((plan->ch[i].property & CH_PASSIVE) ?
", passive" : ""),
((plan->ch[i].property & CH_DFS) ? ", dfs" : ""),
((plan->ch[i].property & CH_PSC) ? ", psc" : ""));
}
}
static void _phl_regulation_send_msg(struct phl_info_t *phl_info, u8 evt_id)
{
struct phl_msg msg = {0};
msg.inbuf = NULL;
msg.inlen = 0;
msg.band_idx = HW_BAND_0;
SET_MSG_MDL_ID_FIELD(msg.msg_id, PHL_MDL_REGU);
SET_MSG_EVT_ID_FIELD(msg.msg_id, evt_id);
if (RTW_PHL_STATUS_SUCCESS != phl_msg_hub_send(phl_info, NULL, &msg))
PHL_ERR("[REGULATION] sending message failed (evt_id: %u) \n", evt_id);
}
static void _history_log(struct rtw_regulation *rg, u8 domain, u8 reason)
{
rg->history[rg->history_cnt].code = domain;
rg->history[rg->history_cnt].reason = reason;
rg->history_cnt++;
if (rg->history_cnt >= MAX_HISTORY_NUM)
rg->history_cnt = 0;
}
static void _get_5ghz_udef_ch_info(struct rtw_user_def_chplan *udef,
u8 group, u16 *ch, u16 *passive, u16 *dfs, u8 *max_num, u8 *ch_start)
{
switch (group) {
case FREQ_GROUP_5GHZ_BAND1:
*ch = (u16)udef->ch5g & 0xf;
*passive = (u16)udef->passive5g & 0xf;
*dfs = (u16)udef->dfs5g & 0xf;
*max_num = MAX_CH_NUM_BAND1;
*ch_start = 36;
break;
case FREQ_GROUP_5GHZ_BAND2:
*ch = (u16)((udef->ch5g & 0xf0) >> 4);
*passive = (u16)((udef->passive5g & 0xf0) >> 4);
*dfs = (u16)((udef->dfs5g & 0xf0) >> 4);
*max_num = MAX_CH_NUM_BAND2;
*ch_start = 52;
break;
case FREQ_GROUP_5GHZ_BAND3:
*ch = (u16)((udef->ch5g & 0xfff00) >> 8);
*passive = (u16)((udef->passive5g & 0xfff00) >> 8);
*dfs = (u16)((udef->dfs5g & 0xfff00) >> 8);
*max_num = MAX_CH_NUM_BAND3;
*ch_start = 100;
break;
case FREQ_GROUP_5GHZ_BAND4:
*ch = (u16)((udef->ch5g & 0xff00000) >> 20);
*passive = (u16)((udef->passive5g & 0xff00000) >> 20);
*dfs = (u16)((udef->dfs5g & 0xff00000) >> 20);
*max_num = MAX_CH_NUM_BAND4;
*ch_start = 149;
break;
default:
*ch = 0;
*passive = 0;
*dfs = 0;
*max_num = 0;
*ch_start = 0;
break;
}
}
/*
* @ Function description
* Reset regulatory info for non-specific country
*/
static void _reset_for_non_specific_country(struct rtw_regulation *rg)
{
/* reset country */
rg->country[0] = 0;
rg->country[1] = 0;
/* reset TPO */
rg->tpo = TPO_NA;
/* default support all */
rg->support_mode |= (SUPPORT_11B | SUPPORT_11G | SUPPORT_11N |
SUPPORT_11A | SUPPORT_11AC | SUPPORT_11AX);
}
/*
* @ Function description
* Set user defined channel plans
*
* @ parameter
* struct rtw_user_def_chplan *udef : user defined channels, bit definition
*
* @ return :
* true : if set successfully
* false : failed to set
*
*/
bool rtw_phl_set_user_def_chplan(void *phl, struct rtw_user_def_chplan *udef)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct rtw_regulation *rg = NULL;
struct rtw_regulation_chplan_group *plan = NULL;
u8 max_num = 0, ch_start = 0;
u16 ch = 0, passive = 0, dfs = 0;
u8 group = FREQ_GROUP_5GHZ_BAND1;
void *d = NULL;
u32 i = 0;
if (!phl || !udef)
return false;
rg = &phl_info->regulation;
if (!rg->init)
return false;
if (rg->domain.code != RSVD_DOMAIN) {
PHL_INFO("[REGU], Only reserved domain can set udef channel plan \n");
return false;
}
PHL_INFO("[REGU], set udef channel plan, ch2g:0x%x, ch5g:0x%x\n",
udef->ch2g, udef->ch5g);
d = phl_to_drvpriv(phl_info);
_os_spinlock(d, &rg->lock, _bh, NULL);
rg->regulation_2g = (u8)udef->regulatory_idx;
rg->regulation_5g = (u8)udef->regulatory_idx;
rg->tpo = udef->tpo;
/* 2 ghz */
plan = &rg->chplan[FREQ_GROUP_2GHZ];
plan->cnt = 0;
ch = udef->ch2g;
passive = udef->passive2g;
_convert_ch2g(rg, &plan->cnt, plan->ch, ch, passive);
PHL_INFO("[REGU], 2 GHz, total channel = %d\n", plan->cnt);
/* 5 ghz */
for (i = 0; i < 4; i++) {
group = (u8)(i + FREQ_GROUP_5GHZ_BAND1);
plan = &rg->chplan[group];
plan->cnt = 0;
_get_5ghz_udef_ch_info(udef, group,
&ch, &passive, &dfs, &max_num, &ch_start);
_convert_ch5g((u8)(i + 1), rg, &plan->cnt, plan->ch,
ch, passive, dfs, max_num, ch_start);
}
_os_spinunlock(d, &rg->lock, _bh, NULL);
return true;
}
/*
* @ Function description
* Check if domain is valid or not
*
* @ parameter
* domain : domain code to query
*
* @ return :
* true : if domain code exists in data base
* false : invalid domain code
*
*/
bool rtw_phl_valid_regulation_domain(u8 domain)
{
if (domain == RSVD_DOMAIN)
return true;
if (_domain_index(domain) >= MAX_RD_MAP_NUM)
return false;
return true;
}
/*
* @ Function description
* Set regulatory domain code
*
* @ parameter
* phl : struct phl_info_t *
* domain : domain code
* reason : why
*
* @ return :
* true : set domain successfully
* false : set fail
*
*/
bool rtw_phl_regulation_set_domain(void *phl, u8 domain,
enum regulation_rsn reason)
{
enum rtw_regulation_status status = REGULATION_SUCCESS;
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct rtw_regulation *rg = NULL;
void *d = NULL;
u8 did = MAX_RD_MAP_NUM;
PHL_INFO("[REGU], set domain code = 0x%x, reason = 0x%x\n",
domain, reason);
if (!phl_info)
return false;
rg = &phl_info->regulation;
if (!rg->init)
return false;
if (!rtw_phl_valid_regulation_domain(domain))
return false;
did = _domain_index(domain);
d = phl_to_drvpriv(phl_info);
_os_spinlock(d, &rg->lock, _bh, NULL);
_history_log(rg, domain, reason);
if (domain == RSVD_DOMAIN) {
rg->domain.code = RSVD_DOMAIN;
rg->domain.reason = reason;
status = REGULATION_SUCCESS;
} else
status = _regulatory_domain_update(rg, did, reason);
if (status == REGULATION_SUCCESS) {
_reset_for_non_specific_country(rg);
rg->valid = true;
} else {
rg->valid = false;
rg->invalid_cnt++;
}
_os_spinunlock(d, &rg->lock, _bh, NULL);
PHL_INFO("[REGU], domain code update status = 0x%x\n", status);
if (status == REGULATION_SUCCESS) {
_phl_regulation_send_msg(phl_info, MSG_EVT_REGU_SET_DOMAIN);
#ifdef CONFIG_6GHZ
regu_set_domain_6g(phl, 0x7f, reason);
#endif
return true;
} else {
return false;
}
}
/*
* @ Function description
* Set regulation by 2bytes country code
*
* @ parameter
* phl : struct phl_info_t *
* country : 2 bytes char
* reason : why
*
* @ return :
* true : set country/domain successfully
* false : set fail
*
*/
bool rtw_phl_regulation_set_country(void *phl, char *country,
enum regulation_rsn reason)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct rtw_regulation *rg = NULL;
void *d = NULL;
u32 i = 0;
PHL_INFO("[REGU], set country code = \"%c%c\", reason = 0x%x\n",
country[0], country[1], reason);
if (!phl_info)
return false;
d = phl_to_drvpriv(phl_info);
rg = &phl_info->regulation;
if (!rg->init)
return false;
if (rg->domain.code == RSVD_DOMAIN)
return false;
for (i = 0; i < MAX_COUNTRY_NUM; i++) {
if (cdmap[i].char2[0] == country[0] &&
cdmap[i].char2[1] == country[1] ) {
if (!rtw_phl_regulation_set_domain(phl,
cdmap[i].domain_code, reason))
return false;
_os_spinlock(d, &rg->lock, _bh, NULL);
rg->country[0] = country[0];
rg->country[1] = country[1];
rg->tpo = cdmap[i].tpo;
rg->support_mode = 0;
if(cdmap[i].support & BIT(0))
rg->support_mode |= (SUPPORT_11B | SUPPORT_11G | SUPPORT_11N);
if(cdmap[i].support & BIT(1))
rg->support_mode |= (SUPPORT_11A);
if(cdmap[i].support & BIT(2))
rg->support_mode |= (SUPPORT_11AC);
if(cdmap[i].support & BIT(3))
rg->support_mode |= (SUPPORT_11AX);
_os_spinunlock(d, &rg->lock, _bh, NULL);
return true;
}
}
PHL_INFO("[REGU], country mismatch !!\n");
return false;
}
/*
* @ Function description
* Query current regulation channel plan
*
* @ parameter
* phl : struct phl_info_t *
* capability : enum rtw_regulation_capability
*
* @ return :
* true : set capability successfully
* false : set capability fail
*
*/
bool rtw_phl_regulation_set_capability(void *phl,
enum rtw_regulation_capability capability)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct rtw_regulation *rg = NULL;
void *d = NULL;
PHL_INFO("[REGU], set capability = 0x%x \n", capability);
if (!phl_info)
return false;
rg = &phl_info->regulation;
if (!rg->init)
return false;
d = phl_to_drvpriv(phl_info);
_os_spinlock(d, &rg->lock, _bh, NULL);
rg->capability = capability;
_os_spinunlock(d, &rg->lock, _bh, NULL);
PHL_INFO("[REGU], set capability = 0x%x successfully !!\n",
rg->capability);
return true;
}
/*
* @ Function description
* Query current regulation channel plan
*
* @ parameter
* phl : struct phl_info_t *
* type : enum rtw_regulation_query, different query type
* filter : struct rtw_chlist *, used to filter regulation channels
* plan : struct rtw_regulation_chplan *, query result will be filled here
* - result will be the intersection of regulation channel plan and
* the filter channels.
*
* @ return :
* true : regulation query successfully, caller can check result
* by input parameter *plan.
* false : regulation query fail
*
*/
bool rtw_phl_regulation_query_chplan(
void *phl, enum rtw_regulation_query type,
struct rtw_chlist *filter,
struct rtw_regulation_chplan *plan)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct rtw_regulation *rg = NULL;
enum rtw_regulation_status status = REGULATION_FAILURE;
void *d = NULL;
if (!phl || !plan)
return false;
if (!_regulation_valid(phl))
return false;
rg = &phl_info->regulation;
d = phl_to_drvpriv(phl_info);
_os_spinlock(d, &rg->lock, _bh, NULL);
switch (type) {
case REGULQ_CHPLAN_FULL:
case REGULQ_CHPLAN_2GHZ:
case REGULQ_CHPLAN_5GHZ_ALL:
case REGULQ_CHPLAN_5GHZ_BAND1:
case REGULQ_CHPLAN_5GHZ_BAND2:
case REGULQ_CHPLAN_5GHZ_BAND3:
case REGULQ_CHPLAN_5GHZ_BAND4:
case REGULQ_CHPLAN_6GHZ_UNII5:
case REGULQ_CHPLAN_6GHZ_UNII6:
case REGULQ_CHPLAN_6GHZ_UNII7:
case REGULQ_CHPLAN_6GHZ_UNII8:
case REGULQ_CHPLAN_6GHZ:
case REGULQ_CHPLAN_6GHZ_PSC:
case REGULQ_CHPLAN_2GHZ_5GHZ:
status = _get_chnlplan(rg, type, plan);
if (filter)
_filter_chnlplan(d, plan, filter);
break;
default:
break;
}
_os_spinunlock(d, &rg->lock, _bh, NULL);
if (status == REGULATION_SUCCESS) {
/* _display_chplan(plan); */
return true;
}
else
return false;
}
/*
* @ Function description
* Query specific regulation channel plan by domain code
*
* @ parameter
* domain : domain code
* plan : struct rtw_regulation_chplan *, query result will be filled here
*
* @ return :
* true : regulation query successfully, caller can check result
* by input parameter *plan.
* false : regulation query fail
*
*/
bool rtw_phl_query_specific_chplan(void *phl, u8 domain,
struct rtw_regulation_chplan *plan)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
const struct chdef_2ghz *chdef2 = NULL;
const struct chdef_5ghz *chdef5 = NULL;
struct rtw_regulation *rg = NULL;
u8 did = MAX_RD_MAP_NUM;
u8 idx2g = INVALID_CHDEF;
u8 idx5g = INVALID_CHDEF;
u16 i = 0, ch = 0, passive = 0, dfs = 0;
u8 group = FREQ_GROUP_5GHZ_BAND1;
u8 max_num = 0, ch_start = 0;
if (!plan)
return false;
plan->cnt = 0;
PHL_INFO("[REGU], query specific channel plan for domain : 0x%x!!\n",
domain);
if (!rtw_phl_valid_regulation_domain(domain))
return false;
/* find channel definition for 2 ghz & 5 ghz */
did = _domain_index(domain);
idx2g = rdmap[did].freq_2g.ch_idx;
for (i = 0; i < MAX_CHDEF_2GHZ; i++) {
if (idx2g == chdef2g[i].idx) {
chdef2 = &chdef2g[i];
}
}
idx5g = rdmap[did].freq_5g.ch_idx;
for (i = 0; i < MAX_CHDEF_5GHZ; i++) {
if (idx5g == chdef5g[i].idx) {
chdef5 = &chdef5g[i];
}
}
/* when regulatory domain & capability is set, check regulatory capability setting first */
if (_regulation_valid(phl)) {
rg = &phl_info->regulation;
if (!(rg->capability & CAPABILITY_2GHZ))
chdef2 = NULL;
if (!(rg->capability & CAPABILITY_5GHZ))
chdef5 = NULL;
}
/* 2ghz */
if (chdef2) {
ch = ((chdef2->support_ch[1] << 8) |
(chdef2->support_ch[0]));
passive = ((chdef2->passive[1] << 8) |
(chdef2->passive[0]));
_convert_ch2g(rg, &plan->cnt, plan->ch, ch, passive);
}
/* 5ghz */
if (chdef5) {
for (i = 0; i < 4; i++) {
group = (u8)(i + FREQ_GROUP_5GHZ_BAND1);
_get_5ghz_ch_info(chdef5, group, &ch, &passive, &dfs,
&max_num, &ch_start);
_convert_ch5g((u8)(i + 1), rg, &plan->cnt, plan->ch,
ch, passive, dfs, max_num, ch_start);
}
}
PHL_INFO("[REGU], query specific channel plan for domain : 0x%x, total channels : %d !!\n",
domain, plan->cnt);
_display_chplan(plan);
return true;
}
/*
* @ Function description
* Query basic regulation info
*
* @ parameter
* phl : struct phl_info_t *
* info : struct rtw_regulation_info *, query result will be filled here
*
* @ return :
* true : regulation query successfully, caller can check result
* by input parameter *info.
* false : regulation query fail
*
*/
bool rtw_phl_query_regulation_info(void *phl, struct rtw_regulation_info *info)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct rtw_regulation *rg = NULL;
void *d = NULL;
if (!phl || !info)
return false;
if (!_regulation_valid(phl))
return false;
rg = &phl_info->regulation;
d = phl_to_drvpriv(phl_info);
_os_spinlock(d, &rg->lock, _bh, NULL);
info->domain_code = (u8)rg->domain.code;
info->domain_reason = rg->domain.reason;
info->country[0] = rg->country[0];
info->country[1] = rg->country[1];
info->tpo = rg->tpo;
info->support_mode = rg->support_mode;
info->regulation_2g = rg->regulation_2g;
info->regulation_5g = rg->regulation_5g;
info->chplan_ver = REGULATION_CHPLAN_VERSION;
info->country_ver = REGULATION_COUNTRY_VERSION;
info->capability = rg->capability;
_os_spinunlock(d, &rg->lock, _bh, NULL);
return true;
}
/*
* @ Function description
* Use the coutry code to query the corresponding
* domain code and properties
*
* @ parameter
* country : 2 bytes char
* country_chplan : pointer to structre of chplan's info
*
* @ return :
* true : successfully search the entry form cdmap
* false : country chplan query fail
*/
bool rtw_phl_query_country_chplan(char *country,
struct rtw_regulation_country_chplan* country_chplan)
{
u32 i = 0;
PHL_INFO("[REGU], query country code = \"%c%c\"\n",
country[0], country[1]);
for (i = 0; i < MAX_COUNTRY_NUM; i++) {
if (cdmap[i].char2[0] == country[0] &&
cdmap[i].char2[1] == country[1] ) {
country_chplan->domain_code = cdmap[i].domain_code;
if(cdmap[i].support & BIT(0))
country_chplan->support_mode |= (SUPPORT_11B | SUPPORT_11G | SUPPORT_11N);
if(cdmap[i].support & BIT(1))
country_chplan->support_mode |= (SUPPORT_11A);
if(cdmap[i].support & BIT(2))
country_chplan->support_mode |= (SUPPORT_11AC);
if(cdmap[i].support & BIT(3))
country_chplan->support_mode |= (SUPPORT_11AX);
country_chplan->tpo = cdmap[i].tpo;
return true;
}
}
return false;
}
bool rtw_phl_regulation_valid(void *phl)
{
return _regulation_valid(phl);
}
/*
* @ Function description
* Used to check if channel is in regulation channel list
*
* @ parameter
* phl : struct phl_info_t *
* channel : channel to be checked
* reject : enum ch_property, ex: (CH_PASSIVE | CH_DFS)
*
* @ return :
* true : channel is in regulation list and not rejected
* false : query regulation failed or channel is not in regulation
* channel list
*/
bool rtw_phl_regulation_valid_channel(void *phl, enum band_type band,
u16 channel, u8 reject)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct rtw_regulation *rg = NULL;
struct rtw_regulation_channel ch = {0};
bool valid = false;
void *d = NULL;
u8 rej_property = reject;
if (!_regulation_valid(phl))
return false;
rg = &phl_info->regulation;
d = phl_to_drvpriv(phl_info);
_os_spinlock(d, &rg->lock, _bh, NULL);
if (_query_channel(rg, band, channel, &ch)) {
if (!(rg->capability & CAPABILITY_DFS))
rej_property |= CAPABILITY_DFS;
if (_valid_property(ch.property, rej_property))
valid = true;
}
_os_spinunlock(d, &rg->lock, _bh, NULL);
return valid;
}
/*
* @ Function description
* Used to check if channel is a regulation DFS channel
*
* @ parameter
* phl : struct phl_info_t *
* channel : channel to be checked
* dfs : result will be filled here
*
* @ return :
* true : regulation query successfully, caller can check result
* by input parameter *dfs.
* false : regulation fail
*
*/
bool rtw_phl_regulation_dfs_channel(void *phl, enum band_type band,
u16 channel, bool *dfs)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct rtw_regulation *rg = NULL;
struct rtw_regulation_channel ch = {0};
void *d = NULL;
bool query = false;
if (!_regulation_valid(phl) || !dfs)
return false;
rg = &phl_info->regulation;
d = phl_to_drvpriv(phl_info);
_os_spinlock(d, &rg->lock, _bh, NULL);
if (_query_channel(rg, band, channel, &ch)) {
query = true;
if (ch.property & CH_DFS)
*dfs = true;
else
*dfs = false;
}
_os_spinunlock(d, &rg->lock, _bh, NULL);
return query;
}
/*
* @ Function description
* Query regulation channel
*
* @ parameter
* phl : struct phl_info_t *
* channel : channel for query
* ch : query result will be filled here
*
* @ return :
* true : regulation query successfully, caller can check result
* by input parameter *ch.
* false : regulation query fail
*
*/
bool rtw_phl_regulation_query_ch(void *phl, enum band_type band, u8 channel,
struct rtw_regulation_channel *ch)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct rtw_regulation *rg = NULL;
void *d = NULL;
bool query = false;
if (!_regulation_valid(phl) || !ch)
return false;
rg = &phl_info->regulation;
d = phl_to_drvpriv(phl_info);
_os_spinlock(d, &rg->lock, _bh, NULL);
if (_query_channel(rg, band, channel, ch))
query = true;
_os_spinunlock(d, &rg->lock, _bh, NULL);
return query;
}
u8 rtw_phl_get_domain_regulation_2g(u8 domain)
{
u8 did = MAX_RD_MAP_NUM;
if (!rtw_phl_valid_regulation_domain(domain))
return REGULATION_MAX;
did = _domain_index(domain);
if (did >= MAX_RD_MAP_NUM)
return REGULATION_MAX;
return rdmap[did].freq_2g.regulation;
}
u8 rtw_phl_get_domain_regulation_5g(u8 domain)
{
u8 did = MAX_RD_MAP_NUM;
if (!rtw_phl_valid_regulation_domain(domain))
return REGULATION_MAX;
did = _domain_index(domain);
if (did >= MAX_RD_MAP_NUM)
return REGULATION_MAX;
return rdmap[did].freq_5g.regulation;
}
|
2301_81045437/rtl8852be
|
phl/phl_regulation.c
|
C
|
agpl-3.0
| 30,788
|
/******************************************************************************
*
* Copyright(c) 2020 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#ifndef _PHL_REGULATION_H_
#define _PHL_REGULATION_H_
#define MAX_CH_NUM_GROUP 24
enum rtw_regulation_freq_group {
FREQ_GROUP_2GHZ = 0x0,
FREQ_GROUP_5GHZ_BAND1,
FREQ_GROUP_5GHZ_BAND2,
FREQ_GROUP_5GHZ_BAND3,
FREQ_GROUP_5GHZ_BAND4,
FREQ_GROUP_6GHZ_UNII5,
FREQ_GROUP_6GHZ_UNII6,
FREQ_GROUP_6GHZ_UNII7,
FREQ_GROUP_6GHZ_UNII8,
FREQ_GROUP_6GHZ_PSC,
FREQ_GROUP_MAX
};
enum rtw_regulation_status {
REGULATION_SUCCESS = 0x0,
REGULATION_FAILURE,
REGULATION_DOMAIN_MISMATCH,
REGULATION_INVALID_2GHZ_RD,
REGULATION_INVALID_5GHZ_RD,
REGULATION_INVALID_DOMAIN
};
struct rtw_regulation_chplan_group {
u32 cnt;
struct rtw_regulation_channel ch[MAX_CH_NUM_GROUP];
};
#define MAX_HISTORY_NUM 20
#define INVALID_DOMAIN_CODE 0xffff
#define INVALID_CHDEF 0xff
struct rtw_domain {
u16 code;
u8 reason;
};
struct rtw_regulation {
_os_lock lock;
u8 init; /* regulation sw initialization */
u8 invalid_cnt;
u8 history_cnt;
struct rtw_domain history[MAX_HISTORY_NUM];
/* working regulation */
bool valid; /* true if domain code successfully set */
u16 capability; /* rtw_regulation_capability */
struct rtw_domain domain;
char country[2];
u8 tpo; /* tx power overwrite */
u8 support_mode;
u8 ch_idx2g; /* 2ghz chdef index */
u8 regulation_2g;
u8 ch_idx5g; /* 5ghz chdef index */
u8 regulation_5g;
struct rtw_regulation_chplan_group chplan[FREQ_GROUP_MAX];
/* 6 ghz */
u8 invalid_cnt_6g;
u8 history_cnt_6g;
struct rtw_domain history_6g[MAX_HISTORY_NUM];
bool valid_6g; /* true if domain code successfully set */
struct rtw_domain domain_6g;
u8 ch_idx6g; /* 6ghz chdef index */
u8 regulation_6g;
};
bool rtw_phl_regulation_query_ch(void *phl, enum band_type band, u8 channel,
struct rtw_regulation_channel *ch);
u8 rtw_phl_get_domain_regulation_2g(u8 domain);
u8 rtw_phl_get_domain_regulation_5g(u8 domain);
#endif /* _PHL_REGULATION_H_ */
|
2301_81045437/rtl8852be
|
phl/phl_regulation.h
|
C
|
agpl-3.0
| 2,527
|
/******************************************************************************
*
* Copyright(c) 2020 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#include "phl_headers.h"
#include "phl_chnlplan.h"
#include "phl_chnlplan_6g.h"
#include "phl_country.h"
extern const struct chdef_6ghz chdef6g[MAX_CHDEF_6GHZ];
extern const struct regulatory_domain_mapping_6g rdmap6[MAX_RD_MAP_NUM_6GHZ];
static void _get_6ghz_ch_info(const struct chdef_6ghz *chdef,
u8 group, u32 *ch, u32 *passive, u8 *max_num, u8 *ch_start)
{
switch (group) {
case FREQ_GROUP_6GHZ_UNII5:
*ch = ((chdef->support_ch_u5[2] << 16) |
(chdef->support_ch_u5[1] << 8) |
(chdef->support_ch_u5[0]));
*passive = ((chdef->passive_u5[2] << 16) |
(chdef->passive_u5[1] << 8) |
(chdef->passive_u5[0]));
*max_num = MAX_CH_NUM_UNII5;
*ch_start = 1;
break;
case FREQ_GROUP_6GHZ_UNII6:
*ch = chdef->support_ch_u6;
*passive = chdef->passive_u6;
*max_num = MAX_CH_NUM_UNII6;
*ch_start = 97;
break;
case FREQ_GROUP_6GHZ_UNII7:
*ch = ((chdef->support_ch_u7[2] << 16) |
(chdef->support_ch_u7[1] << 8) |
(chdef->support_ch_u7[0]));
*passive = ((chdef->passive_u7[2] << 16) |
(chdef->passive_u7[1] << 8) |
(chdef->passive_u7[0]));
*max_num = MAX_CH_NUM_UNII7;
*ch_start = 121;
break;
case FREQ_GROUP_6GHZ_UNII8:
*ch = ((chdef->support_ch_u8[1] << 8) |
(chdef->support_ch_u8[0]));
*passive = ((chdef->passive_u8[1] << 8) |
(chdef->passive_u8[0]));
*max_num = MAX_CH_NUM_UNII8;
*ch_start = 193;
break;
default:
*ch = 0;
*passive = 0;
*max_num = 0;
*ch_start = 0;
break;
}
}
static void _convert_ch6g(u8 unii_6g, struct rtw_regulation *rg,
u32 *ch_cnt, struct rtw_regulation_channel *rch,
u32 ch, u32 passive, u8 max_num, u8 ch_start)
{
u16 i = 0;
u32 shift = 0;
u8 property = 0;
u32 cnt = 0;
PHL_INFO("[REGU], convert 6 ghz unii-%d channels, from %d, ch=0x%x, passive = 0x%x \n",
unii_6g, ch_start, ch, passive);
for (i = 0; i < max_num; i++) {
shift = (1 << i);
if (ch & shift) {
property = 0;
rch[*ch_cnt].band = BAND_ON_6G;
rch[*ch_cnt].channel = (u8)(ch_start + (i * 4));
if (passive & shift)
property |= CH_PASSIVE;
if ((rch[*ch_cnt].channel % 16) == 5)
property |= CH_PSC;
rch[*ch_cnt].property = property;
PHL_INFO("[REGU], ch: %d%s%s \n",
rch[*ch_cnt].channel,
((property & CH_PASSIVE) ? ", passive" : ""),
((property & CH_PSC) ? ", psc" : ""));
(*ch_cnt)++;
cnt++;
}
}
PHL_INFO("[REGU], converted channels : %d\n", cnt);
}
static void _update_psc_group(struct rtw_regulation *rg)
{
u8 group = FREQ_GROUP_6GHZ_UNII5;
struct rtw_regulation_chplan_group *plan =
&rg->chplan[FREQ_GROUP_6GHZ_PSC];
struct rtw_regulation_chplan_group *src = NULL;
u32 i = 0, j = 0;
plan->cnt = 0;
for (i = 0; i < 4; i++) {
group = (u8)(i + FREQ_GROUP_6GHZ_UNII5);
src = &rg->chplan[group];
for (j = 0; j < src->cnt; j++) {
if (src->ch[j].property & CH_PSC) {
plan->ch[plan->cnt].band =
src->ch[j].band;
plan->ch[plan->cnt].channel =
src->ch[j].channel;
plan->ch[plan->cnt].property =
src->ch[j].property;
plan->cnt++;
}
}
}
}
static bool _chnlplan_update_6g(struct rtw_regulation *rg,
u8 regulation, u8 ch_idx)
{
const struct chdef_6ghz *chdef = NULL;
struct rtw_regulation_chplan_group *plan = NULL;
u8 group = FREQ_GROUP_6GHZ_UNII5;
u8 max_num = 0, ch_start = 0;
u16 i = 0;
u32 ch = 0, passive = 0;
u32 total = 0;
if (regulation >= REGULATION_MAX)
return false;
for (i = 0; i < MAX_CHDEF_6GHZ; i++) {
if (ch_idx == chdef6g[i].idx) {
chdef = &chdef6g[i];
break;
}
}
if (!chdef)
return false;
rg->ch_idx6g = ch_idx;
rg->regulation_6g = regulation;
for (i = 0; i < 4; i++) {
group = (u8)(i + FREQ_GROUP_6GHZ_UNII5);
plan = &rg->chplan[group];
plan->cnt = 0;
_get_6ghz_ch_info(chdef, group,
&ch, &passive, &max_num, &ch_start);
_convert_ch6g((u8)(i + 5), rg, &plan->cnt, plan->ch,
ch, passive, max_num, ch_start);
total += plan->cnt;
}
_update_psc_group(rg);
PHL_INFO("[REGU], 6 GHz, total channel = %d\n", total);
return true;
}
static u8 _domain_index_6g(u16 domain)
{
u8 i = 0;
for (i = 0; i < MAX_RD_MAP_NUM_6GHZ; i++) {
if (domain == rdmap6[i].domain_code) {
return i;
}
}
return MAX_RD_MAP_NUM_6GHZ;
}
static bool _regulatory_domain_update_6g(struct rtw_regulation *rg,
u16 domain, enum regulation_rsn reason)
{
u8 regulation = REGULATION_NA;
u8 ch_idx = 0, did = 0;
rg->domain_6g.reason = reason;
if (domain == RSVD_DOMAIN) {
rg->domain_6g.code = RSVD_DOMAIN;
return true;
} else {
/* Note: only valid domain index can reach here */
did = _domain_index_6g(domain);
rg->domain_6g.code = rdmap6[did].domain_code;
regulation = rdmap6[did].regulation;
ch_idx = rdmap6[did].ch_idx;
return _chnlplan_update_6g(rg, regulation, ch_idx);
}
}
static void _get_group_chplan_6g(struct rtw_regulation *rg,
struct rtw_regulation_chplan_group *group,
struct rtw_regulation_chplan *plan)
{
u32 i = 0;
for (i = 0; i < group->cnt; i++) {
if (group->ch[i].channel) {
plan->ch[plan->cnt].band =
group->ch[i].band;
plan->ch[plan->cnt].channel =
group->ch[i].channel;
plan->ch[plan->cnt].property =
group->ch[i].property;
plan->cnt++;
}
}
}
static void _history_log_6g(struct rtw_regulation *rg, u16 domain, u8 reason)
{
rg->history_6g[rg->history_cnt_6g].code = domain;
rg->history_6g[rg->history_cnt_6g].reason = reason;
rg->history_cnt_6g++;
if (rg->history_cnt_6g >= MAX_HISTORY_NUM)
rg->history_cnt_6g = 0;
}
void regu_get_chnlplan_6g(struct rtw_regulation *rg,
enum rtw_regulation_query type,
struct rtw_regulation_chplan *plan)
{
struct rtw_regulation_chplan_group *group = NULL;
/* 6ghz */
if (rg->capability & CAPABILITY_6GHZ) {
/* unii5 */
if (type == REGULQ_CHPLAN_6GHZ ||
type == REGULQ_CHPLAN_6GHZ_UNII5) {
group = &rg->chplan[FREQ_GROUP_6GHZ_UNII5];
_get_group_chplan_6g(rg, group, plan);
}
/* unii6 */
if (type == REGULQ_CHPLAN_6GHZ ||
type == REGULQ_CHPLAN_6GHZ_UNII6) {
group = &rg->chplan[FREQ_GROUP_6GHZ_UNII6];
_get_group_chplan_6g(rg, group, plan);
}
/* unii7 */
if (type == REGULQ_CHPLAN_6GHZ ||
type == REGULQ_CHPLAN_6GHZ_UNII7) {
group = &rg->chplan[FREQ_GROUP_6GHZ_UNII7];
_get_group_chplan_6g(rg, group, plan);
}
/* unii8 */
if (type == REGULQ_CHPLAN_6GHZ ||
type == REGULQ_CHPLAN_6GHZ_UNII8) {
group = &rg->chplan[FREQ_GROUP_6GHZ_UNII8];
_get_group_chplan_6g(rg, group, plan);
}
/* psc */
if (type == REGULQ_CHPLAN_FULL ||
type == REGULQ_CHPLAN_6GHZ_PSC) {
group = &rg->chplan[FREQ_GROUP_6GHZ_PSC];
_get_group_chplan_6g(rg, group, plan);
}
}
}
bool regu_valid_domain_6g(u16 domain)
{
if (domain == RSVD_DOMAIN)
return true;
if (_domain_index_6g(domain) >= MAX_RD_MAP_NUM_6GHZ)
return false;
return true;
}
bool regu_set_domain_6g(void *phl, u16 domain,
enum regulation_rsn reason)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct rtw_regulation *rg = NULL;
void *d = NULL;
PHL_INFO("[REGU], set 6 ghz domain code = 0x%x, reason = 0x%x\n",
domain, reason);
if (!phl_info)
return false;
rg = &phl_info->regulation;
if (!rg->init)
return false;
if (!regu_valid_domain_6g(domain))
return false;
d = phl_to_drvpriv(phl_info);
_os_spinlock(d, &rg->lock, _bh, NULL);
_history_log_6g(rg, domain, reason);
if (_regulatory_domain_update_6g(rg, domain, reason))
rg->valid_6g = true;
else {
rg->valid_6g = false;
rg->invalid_cnt_6g++;
}
_os_spinunlock(d, &rg->lock, _bh, NULL);
PHL_INFO("[REGU], 6 ghz domain code valid = %d\n", rg->valid_6g);
return rg->valid_6g;
}
|
2301_81045437/rtl8852be
|
phl/phl_regulation_6g.c
|
C
|
agpl-3.0
| 8,236
|
/******************************************************************************
*
* Copyright(c) 2020 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#ifndef _PHL_REGULATION_6G_H_
#define _PHL_REGULATION_6G_H_
bool regu_set_domain_6g(void *phl, u16 domain, enum regulation_rsn reason);
void regu_get_chnlplan_6g(struct rtw_regulation *rg,
enum rtw_regulation_query type,
struct rtw_regulation_chplan *plan);
#endif /* _PHL_REGULATION_6G_H_ */
|
2301_81045437/rtl8852be
|
phl/phl_regulation_6g.h
|
C
|
agpl-3.0
| 952
|
/******************************************************************************
*
* Copyright(c) 2020 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#ifndef _PHL_REGULATION_DEF_H_
#define _PHL_REGULATION_DEF_H_
#define RSVD_DOMAIN 0x1a
#define MAX_CH_NUM_2GHZ 14
#define MAX_CH_NUM_BAND1 4 /* 36, 40, 44, 48 */
#define MAX_CH_NUM_BAND2 4 /* 52, 56, 60, 64 */
#define MAX_CH_NUM_BAND3 12 /* 100, 104, 108, 112,
116, 120, 124, 128,
132, 136, 140, 144 */
#define MAX_CH_NUM_BAND4 8 /* 149, 153, 157, 161, 165, 169, 173, 177 */
#define MAX_CH_NUM_5GHZ (MAX_CH_NUM_BAND1 + MAX_CH_NUM_BAND2 +\
MAX_CH_NUM_BAND3 + MAX_CH_NUM_BAND4)
#define MAX_CH_NUM_UNII5 24 /* 1 ~ 93 */
#define MAX_CH_NUM_UNII6 6 /* 97 ~ 117 */
#define MAX_CH_NUM_UNII7 18 /* 121 ~ 189 */
#define MAX_CH_NUM_UNII8 12 /* 193 ~ 237 */
#define MAX_CH_NUM_6GHZ (MAX_CH_NUM_UNII5 + MAX_CH_NUM_UNII6 +\
MAX_CH_NUM_UNII7 + MAX_CH_NUM_UNII8)
#define BAND_2GHZ(_band_) ((_band_ == BAND_ON_24G) ? true : false)
#define BAND_5GHZ(_band_) ((_band_ == BAND_ON_5G) ? true : false)
#define BAND_6GHZ(_band_) ((_band_ == BAND_ON_6G) ? true : false)
#define CH_5GHZ_BAND1(_ch_) (((_ch_ >= 36) && (_ch_ <= 48)) ? true : false)
#define CH_5GHZ_BAND2(_ch_) (((_ch_ >= 52) && (_ch_ <= 64)) ? true : false)
#define CH_5GHZ_BAND3(_ch_) (((_ch_ >= 100) && (_ch_ <= 144)) ? true : false)
#define CH_5GHZ_BAND4(_ch_) (((_ch_ >= 149) && (_ch_ <= 177)) ? true : false)
#define SUPPORT_11A BIT(0)
#define SUPPORT_11B BIT(1)
#define SUPPORT_11G BIT(2)
#define SUPPORT_11N BIT(3)
#define SUPPORT_11AC BIT(4)
#define SUPPORT_11AX BIT(5)
enum regulation_rsn {
REGU_RSN_DEFAULT = 0x0,
REGU_RSN_SMBIOS,
REGU_RSN_EFUSE,
REGU_RSN_11D,
REGU_RSN_REGISTRY,
REGU_RSN_LOCATION,
REGU_RSN_MANUAL,
REGU_RSN_MAX
};
enum rtw_regulation_capability {
CAPABILITY_2GHZ = BIT(0),
CAPABILITY_5GHZ = BIT(1),
CAPABILITY_DFS = BIT(2),
CAPABILITY_6GHZ = BIT(3)
};
enum rtw_regulation_query {
REGULQ_CHPLAN_FULL = 0x0,
REGULQ_CHPLAN_2GHZ,
REGULQ_CHPLAN_5GHZ_ALL,
REGULQ_CHPLAN_5GHZ_BAND1,
REGULQ_CHPLAN_5GHZ_BAND2,
REGULQ_CHPLAN_5GHZ_BAND3,
REGULQ_CHPLAN_5GHZ_BAND4,
REGULQ_CHPLAN_6GHZ_UNII5,
REGULQ_CHPLAN_6GHZ_UNII6,
REGULQ_CHPLAN_6GHZ_UNII7,
REGULQ_CHPLAN_6GHZ_UNII8,
REGULQ_CHPLAN_6GHZ,
REGULQ_CHPLAN_6GHZ_PSC,
REGULQ_CHPLAN_2GHZ_5GHZ,
};
enum ch_property {
CH_PASSIVE = BIT(0), /* regulatory passive channel */
CH_DFS = BIT(1), /* 5 ghz DFS channel */
CH_PSC = BIT(2) /* 6 ghz preferred scanning channel */
};
struct rtw_regulation_channel {
enum band_type band;
u8 channel;
u8 property;
};
struct rtw_regulation_chplan {
u32 cnt;
struct rtw_regulation_channel ch[MAX_CH_NUM_2GHZ +
MAX_CH_NUM_5GHZ +
MAX_CH_NUM_6GHZ];
};
struct rtw_ch {
enum band_type band;
u8 ch;
};
struct rtw_chlist {
u32 cnt;
struct rtw_ch ch[MAX_CH_NUM_2GHZ +
MAX_CH_NUM_5GHZ +
MAX_CH_NUM_6GHZ];
};
struct rtw_regulation_info {
u8 domain_code;
u8 domain_reason;
u8 domain_code_6g;
u8 domain_reason_6g;
char country[2];
u8 support_mode;
u8 regulation_2g;
u8 regulation_5g;
u8 regulation_6g;
u8 tpo;
u8 chplan_ver;
u8 country_ver;
u16 capability;
};
struct rtw_regulation_country_chplan {
u8 domain_code;
u8 support_mode;
/*
* bit0: accept 11a
* bit1: accept 11b
* bit2: accept 11g
* bit3: accept 11n
* bit4: accept 11ac
* bit5: accept 11ax
*/
u8 tpo; /* tx power overwrite */
};
struct rtw_user_def_chplan {
u16 ch2g; /* bit0 ~ bit13 : ch1~ch14 */
u16 passive2g; /* bit0 ~ bit13 : ch1~ch14, if value = 1, means passive for that channel */
/* 5g channels.
* bit0~7 : ch 36/40/44/48/52/56/60/64
* bit8~15 : ch 100/104/108/112/116/120/124/128
* bit16~23 : ch 132/136/140/144/149/153/157/161
* bit24~27 : ch 165/169/173/177
*/
u32 ch5g;
u32 passive5g;
u32 dfs5g;
u32 regulatory_idx;
u8 tpo;
};
/*
* NOTE:
* This api prototype will be removed after hal related API/header is added
* for halrf.
*/
bool rtw_phl_query_regulation_info(void *phl, struct rtw_regulation_info *info);
#endif /* _PHL_REGULATION_DEF_H_ */
|
2301_81045437/rtl8852be
|
phl/phl_regulation_def.h
|
C
|
agpl-3.0
| 4,530
|
/******************************************************************************
*
* Copyright(c) 2019 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#define _PHL_ROLE_C_
#include "phl_headers.h"
static enum rtw_phl_status
_phl_alloc_hw_resource(struct phl_info_t *phl,
struct rtw_wifi_role_t *wrole, u8 hw_band)
{
enum rtw_phl_status psts = RTW_PHL_STATUS_FAILURE;
void *drv = phl_to_drvpriv(phl);
struct rtw_phl_com_t *phl_com = phl->phl_com;
struct mr_ctl_t *mr_ctl = phlcom_to_mr_ctrl(phl_com);
struct hal_spec_t *hal_spec = phl_get_ic_spec(phl_com);
struct hw_band_ctl_t *band_ctrl;
u8 port_num, max_port_num;
u8 wmm_num, max_wmm_num;
int ridx;
band_ctrl = &mr_ctl->band_ctrl[hw_band];
max_port_num = hal_spec->port_num;
max_wmm_num = hal_spec->wmm_num;
ridx = wrole->id;
_os_spinlock(drv, &band_ctrl->lock, _ps, NULL);
/*alloc hw_port*/
for (port_num = 0; port_num < max_port_num; port_num++) {
if (!(band_ctrl->port_map & BIT(port_num))) {
band_ctrl->port_map |= BIT(port_num);
break;
}
}
if (port_num == max_port_num) {
PHL_ERR("%s Can't allocate hw port\n", __func__);
_os_warn_on(1);
goto _exit;
}
wrole->hw_port = port_num;
/*alloc hw_wmm*/
for (wmm_num = 0; wmm_num < max_wmm_num; wmm_num++) {
if (!(band_ctrl->wmm_map & BIT(wmm_num))) {
band_ctrl->wmm_map |= BIT(wmm_num);
break;
}
}
if (wmm_num == max_wmm_num) {
PHL_INFO("%s WID:%d - assigne wmm_id to 0\n", __func__, ridx);
wrole->hw_wmm = 0;
} else {
wrole->hw_wmm = wmm_num;
}
#ifdef RTW_WKARD_HW_WMM_ALLOCATE
if (wrole->hw_wmm == 0)
_os_atomic_inc(phl_to_drvpriv(phl), &wrole->hw_wmm0_ref_cnt);
#endif
/*set mr_role_map*/
band_ctrl->role_map |= BIT(ridx);
wrole->hw_band = hw_band;
psts = RTW_PHL_STATUS_SUCCESS;
_exit:
_os_spinunlock(drv, &band_ctrl->lock, _ps, NULL);
if (psts == RTW_PHL_STATUS_SUCCESS) {
PHL_INFO("%s wridx:%d success\n", __func__, ridx);
PHL_DUMP_ROLE_EX(phl, wrole);
}
return psts;
}
static enum rtw_phl_status
_phl_free_hw_resource(struct phl_info_t *phl,
struct rtw_wifi_role_t *wrole)
{
enum rtw_phl_status psts = RTW_PHL_STATUS_FAILURE;
void *drv = phl_to_drvpriv(phl);
struct rtw_phl_com_t *phl_com = phl->phl_com;
struct mr_ctl_t *mr_ctl = phlcom_to_mr_ctrl(phl_com);
struct hw_band_ctl_t *band_ctrl;
int ridx = wrole->id;
u8 hw_band = wrole->hw_band;
band_ctrl = &mr_ctl->band_ctrl[hw_band];
PHL_DUMP_ROLE_EX(phl, wrole);
_os_spinlock(drv, &band_ctrl->lock, _ps, NULL);
/*release hw_port*/
band_ctrl->port_map &= ~BIT(wrole->hw_port);
/*release hw_wmm*/
#ifdef RTW_WKARD_HW_WMM_ALLOCATE
if (wrole->hw_wmm == 0) {
_os_atomic_dec(phl_to_drvpriv(phl), &wrole->hw_wmm0_ref_cnt);
if (_os_atomic_read(phl_to_drvpriv(phl), &wrole->hw_wmm0_ref_cnt) == 0)
band_ctrl->wmm_map &= ~BIT(wrole->hw_wmm);
} else
#endif
{
band_ctrl->wmm_map &= ~BIT(wrole->hw_wmm);
}
/*release mr_role_map*/
band_ctrl->role_map &= ~BIT(ridx);
_os_spinunlock(drv, &band_ctrl->lock, _ps, NULL);
psts = RTW_PHL_STATUS_SUCCESS;
PHL_INFO("%s wr-id:%d success\n", __func__, ridx);
return psts;
}
#if 0 /*TODO:DBCC*/
static enum rtw_phl_status
_phl_realloc_hw_resource(struct phl_info_t *phl,
struct rtw_wifi_role_t *wrole, u8 new_band)
{
enum rtw_phl_status psts = RTW_PHL_STATUS_FAILURE;
if (new_band == wrole->hw_band) {
PHL_ERR("%s widx:%d at the same band(%d)\n", __func__, wrole->id, new_band);
return psts;
}
_phl_free_hw_resource(phl, wrole);
_phl_alloc_hw_resource(phl, wrole, new_band);
return psts;
}
#endif
static u8 _phl_search_roleidx_by_addr(
struct phl_info_t *phl_info, u8 *mac_addr)
{
struct rtw_phl_com_t *phl_com = phl_info->phl_com;
u8 ridx = MAX_WIFI_ROLE_NUMBER;
struct mr_ctl_t *mr_ctl = phlcom_to_mr_ctrl(phl_com);
for (ridx = 0; ridx < MAX_WIFI_ROLE_NUMBER; ridx++) {
if (mr_ctl->role_map & BIT(ridx)) {
if (_os_mem_cmp(phl_to_drvpriv(phl_info),
phl_com->wifi_roles[ridx].mac_addr,
mac_addr, MAC_ALEN) == 0)
break;
}
}
#if 0
if (ridx == MAX_WIFI_ROLE_NUMBER)
PHL_INFO("%s cannot get rid\n", __func__);
#endif
return ridx;
}
struct rtw_wifi_role_t *
phl_get_wrole_by_addr(struct phl_info_t *phl_info, u8 *mac_addr)
{
struct rtw_phl_com_t *phl_com = phl_info->phl_com;
int ridx = MAX_WIFI_ROLE_NUMBER;
struct mr_ctl_t *mr_ctl = phlcom_to_mr_ctrl(phl_com);
struct rtw_wifi_role_t *wrole = NULL;
bool found = false;
for (ridx = 0; ridx < MAX_WIFI_ROLE_NUMBER; ridx++) {
if (mr_ctl->role_map & BIT(ridx)) {
wrole = &(phl_com->wifi_roles[ridx]);
if (_os_mem_cmp(phl_to_drvpriv(phl_info),
wrole->mac_addr, mac_addr, MAC_ALEN) == 0) {
found = true;
break;
}
}
}
if (found == false)
wrole = NULL;
return wrole;
}
struct rtw_wifi_role_t *
phl_get_wrole_by_ridx(struct phl_info_t *phl_info, u8 rold_idx)
{
struct rtw_phl_com_t *phl_com = phl_info->phl_com;
if (rold_idx < MAX_WIFI_ROLE_NUMBER)
return &(phl_com->wifi_roles[rold_idx]);
return NULL;
}
static void
_phl_role_notify_buf_done(void* priv, struct phl_msg* msg)
{
struct phl_info_t *phl_info = (struct phl_info_t *)priv;
if(msg->inbuf && msg->inlen){
_os_mem_free(phl_to_drvpriv(phl_info), msg->inbuf, msg->inlen);
}
}
#ifdef CONFIG_CMD_DISP
static void
_phl_role_cmd_done(void* priv, struct phl_msg* msg)
{
struct phl_info_t *phl_info = (struct phl_info_t *)priv;
if(msg->inbuf && msg->inlen){
_os_kmem_free(phl_to_drvpriv(phl_info),
msg->inbuf, msg->inlen);
}
}
static void
_phl_send_role_notify_cmd(struct phl_info_t *phl_info,
struct rtw_wifi_role_t *wrole, enum role_state rstate)
{
struct phl_msg msg = {0};
struct phl_msg_attribute attr = {0};
struct rtw_role_cmd *rcmd = NULL;
rcmd = (struct rtw_role_cmd *)_os_kmem_alloc(
phl_to_drvpriv(phl_info), sizeof(struct rtw_role_cmd));
if (rcmd == NULL) {
PHL_ERR("%s: alloc role cmd fail.\n", __func__);
return;
}
rcmd->wrole = wrole;
rcmd->rstate = rstate;
msg.inbuf = (u8 *)rcmd;
msg.inlen = sizeof(struct rtw_role_cmd);
SET_MSG_MDL_ID_FIELD(msg.msg_id, PHL_MDL_MRC);
SET_MSG_EVT_ID_FIELD(msg.msg_id, MSG_EVT_ROLE_NTFY);
msg.band_idx = HW_BAND_0;
attr.completion.completion = _phl_role_cmd_done;
attr.completion.priv = phl_info;
if (phl_disp_eng_send_msg(phl_info, &msg, &attr, NULL) !=
RTW_PHL_STATUS_SUCCESS) {
PHL_ERR("%s: dispr_send_msg failed !\n", __func__);
goto cmd_fail;
}
return;
cmd_fail:
_os_mem_free(phl_to_drvpriv(phl_info), rcmd,
sizeof(struct rtw_role_cmd));
}
#endif
static enum rtw_phl_status
_phl_role_notify(struct phl_info_t *phl_info,
struct rtw_wifi_role_t *wrole,
enum role_state rstate)
{
if (rstate >= PHL_ROLE_STATE_UNKNOWN) {
PHL_ERR("%s unknow rstate:%d\n", __func__, rstate);
return RTW_PHL_STATUS_FAILURE;
}
#ifdef CONFIG_PHL_CMD_BTC
_phl_send_role_notify_cmd(phl_info, wrole, rstate);
#else
#ifdef CONFIG_BTCOEX
rtw_phl_btc_role_notify(phl_info, wrole->id, rstate);
#endif
#endif
return RTW_PHL_STATUS_SUCCESS;
}
static struct rtw_wifi_role_t *
_phl_wifi_role_alloc_sw(struct phl_info_t *phl_info,
u8 *mac_addr, enum role_type type, u8 ridx)
{
struct rtw_phl_com_t *phl_com = phl_info->phl_com;
struct rtw_wifi_role_t *role = NULL;
struct mr_ctl_t *mr_ctl = phlcom_to_mr_ctrl(phl_com);
u8 role_idx = INVALID_WIFI_ROLE_IDX;
u8 hw_band;
_os_spinlock(phl_to_drvpriv(phl_info), &mr_ctl->lock, _ps, NULL);
if (ridx == UNSPECIFIED_ROLE_ID) {
/*search avaliable rid */
for (role_idx = 0; role_idx < MAX_WIFI_ROLE_NUMBER; role_idx++) {
if (!(mr_ctl->role_map & BIT(role_idx))) {
mr_ctl->role_map |= BIT(role_idx);
break;
}
}
} else {
if (mr_ctl->role_map & BIT(ridx)) {
PHL_ERR("role_idx:%d has used\n", ridx);
_os_warn_on(1);
} else {
mr_ctl->role_map |= BIT(ridx);
role_idx = ridx;
}
}
_os_spinunlock(phl_to_drvpriv(phl_info), &mr_ctl->lock, _ps, NULL);
if (role_idx == INVALID_WIFI_ROLE_IDX) {
PHL_ERR("%s Can't get available role idx\n", __func__);
_os_warn_on(1);
return role;
}
role = &phl_com->wifi_roles[role_idx];
role->type = type;
role->mstate = MLME_NO_LINK;
role->chanctx = NULL;
hw_band = 0;
/*alloc hw_band and hw_port,hw_wmm*/
if (_phl_alloc_hw_resource(phl_info, role, hw_band) != RTW_PHL_STATUS_SUCCESS) {
PHL_ERR("%s alloc_hw resource failed\n", __func__);
_os_warn_on(1);
role = NULL;
return role;
}
#ifdef RTW_WKARD_STA_BCN_INTERVAL
#ifdef RTW_PHL_BCN
if (role->type == PHL_RTYPE_AP) {
role->bcn_cmn.bcn_interval = 100;
role->mstate = MLME_LINKED;
}
#endif
#endif
_os_mem_cpy(phl_to_drvpriv(phl_info),
role->mac_addr, mac_addr, MAC_ALEN);
phl_init_role_cap(phl_info, role);
phl_init_protocol_cap(phl_info, role);
return role;
}
static enum rtw_phl_status
_phl_wifi_role_start(struct phl_info_t *phl_info,
struct rtw_wifi_role_t *wrole,
struct rtw_phl_stainfo_t *sta)
{
enum rtw_phl_status psts = RTW_PHL_STATUS_FAILURE;
enum rtw_hal_status hsts = RTW_HAL_STATUS_FAILURE;
if (sta->active == false) {
psts = phl_alloc_stainfo_hw(phl_info, sta);
if (psts != RTW_PHL_STATUS_SUCCESS) {
PHL_ERR("%s can't alloc self stainfo_hw\n", __func__);
_os_warn_on(1);
goto _exit;
}
PHL_INFO("%s sta_info hw - macid:%u %02x:%02x:%02x:%02x:%02x:%02x\n",
__func__, sta->macid,
sta->mac_addr[0], sta->mac_addr[1], sta->mac_addr[2],
sta->mac_addr[3], sta->mac_addr[4], sta->mac_addr[5]);
}
hsts = rtw_hal_role_cfg(phl_info->hal, wrole);
if(hsts != RTW_HAL_STATUS_SUCCESS) {
PHL_ERR("%s hal role cfg failed\n", __func__);
_os_warn_on(1);
goto _exit;
}
wrole->active = true;
_phl_role_notify(phl_info, wrole, PHL_ROLE_START);
psts = RTW_PHL_STATUS_SUCCESS;
return psts;
_exit:
return psts;
}
static enum rtw_phl_status
_phl_wifi_role_free_sw(struct phl_info_t *phl_info, struct rtw_wifi_role_t *wrole)
{
struct rtw_phl_com_t *phl_com = phl_info->phl_com;
struct mr_ctl_t *mr_ctl = phlcom_to_mr_ctrl(phl_com);
_phl_free_hw_resource(phl_info, wrole);
_os_mem_set(phl_to_drvpriv(phl_info), wrole->mac_addr, 0, MAC_ALEN);
_os_spinlock(phl_to_drvpriv(phl_info), &mr_ctl->lock, _ps, NULL);
mr_ctl->role_map &= ~BIT(wrole->id);
_os_spinunlock(phl_to_drvpriv(phl_info), &mr_ctl->lock, _ps, NULL);
wrole->active = false;
return RTW_PHL_STATUS_SUCCESS;
}
#ifdef CONFIG_CMD_DISP
struct wr_start_param {
struct rtw_wifi_role_t *wrole;
struct rtw_phl_stainfo_t *sta;
};
enum rtw_phl_status
phl_wifi_role_start_hdl(struct phl_info_t *phl_info, u8 *param)
{
struct wr_start_param *cmd_wr = (struct wr_start_param *)param;
return _phl_wifi_role_start(phl_info, cmd_wr->wrole, cmd_wr->sta);
}
void phl_wifi_role_start_done(void *drv_priv, u8 *cmd, u32 cmd_len,
enum rtw_phl_status status)
{
if (cmd) {
_os_kmem_free(drv_priv, cmd, cmd_len);
cmd = NULL;
PHL_INFO("%s.....\n", __func__);
}
}
enum rtw_phl_status
phl_wifi_role_start(struct phl_info_t *phl_info,
struct rtw_wifi_role_t *wrole,
struct rtw_phl_stainfo_t *sta)
{
enum rtw_phl_status psts = RTW_PHL_STATUS_FAILURE;
struct wr_start_param *wr_start = NULL;
void *drv = phl_to_drvpriv(phl_info);
u32 wr_start_len;
wr_start_len = sizeof(struct wr_start_param);
wr_start = _os_kmem_alloc(drv, wr_start_len);
if (wr_start == NULL) {
PHL_ERR("%s: alloc wr_start_param failed!\n", __func__);
psts = RTW_PHL_STATUS_RESOURCE;
goto _exit;
}
wr_start->wrole = wrole;
wr_start->sta = sta;
psts = phl_cmd_enqueue(phl_info,
wrole->hw_band,
MSG_EVT_ROLE_START,
(u8*)wr_start,
wr_start_len,
phl_wifi_role_start_done,
PHL_CMD_WAIT,
0);
if (is_cmd_failure(psts)) {
/* Send cmd success, but wait cmd fail*/
psts = RTW_PHL_STATUS_FAILURE;
PHL_INFO("%s wr-id:%d failure\n", __func__, wrole->id);
} else if (psts != RTW_PHL_STATUS_SUCCESS) {
/* Send cmd fail */
_os_kmem_free(drv, wr_start, wr_start_len);
psts = RTW_PHL_STATUS_FAILURE;
PHL_INFO("%s wr-id:%d failure\n", __func__, wrole->id);
} else {
PHL_INFO("%s wr-id:%d success\n", __func__, wrole->id);
}
_exit:
return psts;
}
#else
enum rtw_phl_status
phl_wifi_role_start(struct phl_info_t *phl_info,
struct rtw_wifi_role_t *wrole,
struct rtw_phl_stainfo_t *sta)
{
return _phl_wifi_role_start(phl_info, wrole, sta);
}
#endif
u8 rtw_phl_wifi_role_alloc(void *phl, u8 *mac_addr, enum role_type type,
u8 ridx, struct rtw_wifi_role_t **wifi_role, bool ignore_hw_fail)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct rtw_phl_com_t *phl_com = phl_info->phl_com;
u8 role_idx = INVALID_WIFI_ROLE_IDX;
struct rtw_wifi_role_t *role = NULL;
struct rtw_phl_stainfo_t *phl_sta = NULL;
enum rtw_phl_status psts = RTW_PHL_STATUS_FAILURE;
if (mac_addr == NULL) {
PHL_ERR("%s mac_addr == NULL\n", __func__);
goto _exit;
}
/*search rid by mac_addr, if had assigned then return role index*/
role_idx = _phl_search_roleidx_by_addr(phl_info, mac_addr);
if (role_idx != INVALID_WIFI_ROLE_IDX) {
PHL_INFO("%s wifi-role(%d) had allociated\n", __func__, role_idx);
*wifi_role = &phl_com->wifi_roles[role_idx];
goto _exit;
}
role = _phl_wifi_role_alloc_sw(phl_info, mac_addr, type, ridx);
if (role == NULL) {
PHL_ERR("%s role alloc sw failed\n", __func__);
_os_warn_on(1);
goto _exit;
}
/*alloc sta_info for self*/
phl_sta = phl_alloc_stainfo_sw(phl_info, role->mac_addr, role);
if (phl_sta == NULL) {
PHL_ERR("%s can't alloc self stainfo_sw\n", __func__);
_os_warn_on(1);
goto _err_stainfo_sw;
}
psts = phl_wifi_role_start(phl_info, role, phl_sta);
if (psts != RTW_PHL_STATUS_SUCCESS) {
PHL_ERR("%s role start failed\n", __func__);
if (!ignore_hw_fail) {
_os_warn_on(1);
goto _err_role_start;
}
}
*wifi_role = role;
role_idx = role->id;
PHL_DUMP_MR_EX(phl_info);
return role_idx;
_err_role_start:
phl_wifi_role_free_stainfo_sw(phl_info, role);
_err_stainfo_sw:
_phl_wifi_role_free_sw(phl_info, role);
_exit:
return role_idx;
}
enum rtw_phl_status
phl_role_noa_notify(struct phl_info_t *phl_info, struct rtw_wifi_role_t *wrole)
{
_phl_role_notify(phl_info, wrole, PHL_ROLE_UPDATE_NOA);
return RTW_PHL_STATUS_SUCCESS;
}
enum rtw_phl_status
phl_role_ap_client_notify(struct phl_info_t *phl_info,
struct rtw_wifi_role_t *wrole, u8 link_sts)
{
enum role_state rstate;
if (link_sts == MLME_LINKING)
rstate = PHL_ROLE_MSTS_STA_CONN_START;
else if (link_sts == MLME_LINKED)
rstate = PHL_ROLE_MSTS_STA_CONN_END;
else
rstate = PHL_ROLE_MSTS_STA_DIS_CONN;
_phl_role_notify(phl_info, wrole, rstate);
#ifdef CONFIG_MCC_SUPPORT
phl_mcc_client_link_notify_for_ap(phl_info, wrole, rstate);
#endif /* CONFIG_MCC_SUPPORT */
return RTW_PHL_STATUS_SUCCESS;
}
enum rtw_phl_status
phl_role_notify(struct phl_info_t *phl_info, struct rtw_wifi_role_t *wrole)
{
enum role_state rstate = PHL_ROLE_STATE_UNKNOWN;
switch (wrole->type) {
case PHL_RTYPE_STATION:
case PHL_RTYPE_P2P_GC:
{
if (wrole->mstate == MLME_LINKING)
rstate = PHL_ROLE_MSTS_STA_CONN_START;
else if (wrole->mstate == MLME_LINKED)
rstate = PHL_ROLE_MSTS_STA_CONN_END;
else
rstate = PHL_ROLE_MSTS_STA_DIS_CONN;
}
break;
case PHL_RTYPE_AP:
case PHL_RTYPE_VAP:
case PHL_RTYPE_MESH:
case PHL_RTYPE_P2P_GO:
{
rstate = (wrole->mstate == MLME_NO_LINK)
? PHL_ROLE_MSTS_AP_STOP
: PHL_ROLE_MSTS_AP_START;
}
break;
case PHL_RTYPE_NONE:
case PHL_RTYPE_ADHOC:
case PHL_RTYPE_ADHOC_MASTER:
case PHL_RTYPE_MONITOR:
case PHL_RTYPE_P2P_DEVICE:
case PHL_RTYPE_NAN:
case PHL_MLME_MAX:
PHL_TRACE(COMP_PHL_DBG, _PHL_ERR_,
"%s: Unsupported case:%d in wrole notify, please check it\n",
__func__, wrole->type);
break;
default:
PHL_TRACE(COMP_PHL_DBG, _PHL_ERR_,
"%s role-type(%d) not support\n",
__func__, wrole->type);
break;
}
_phl_role_notify(phl_info, wrole, rstate);
return RTW_PHL_STATUS_SUCCESS;
}
/**
* This function is called once wifi info changed
* (see enum wr_chg_id)
* @phl: see phl_info_t
* @wrole: information is updated for this wifi role
* @chg_id: see enum wr_chg_id
* @chg_info: the change info to be update
*/
enum rtw_phl_status
phl_wifi_role_change(struct phl_info_t *phl_info,
struct rtw_wifi_role_t *wrole,
enum wr_chg_id chg_id, void *chg_info)
{
enum rtw_phl_status pstate = RTW_PHL_STATUS_FAILURE;
enum rtw_hal_status hstate;
struct rtw_phl_stainfo_t *sta = NULL;
enum phl_upd_mode mode = PHL_UPD_ROLE_MAX;
void *drv = phl_to_drvpriv(phl_info);
switch (chg_id) {
case WR_CHG_TYPE:
{
enum role_type type = *(enum role_type *)chg_info;
if (wrole->type == type) {
PHL_WARN("wrole type(%d) not change\n", wrole->type);
pstate = RTW_PHL_STATUS_SUCCESS;
return pstate;
}
PHL_INFO("wrole type(%d) change to type(%d)\n",
wrole->type, type);
sta = rtw_phl_get_stainfo_self(phl_info, wrole);
if (sta) {
if (type == PHL_RTYPE_TDLS || wrole->type == PHL_RTYPE_TDLS) {
wrole->type = type;
pstate = RTW_PHL_STATUS_SUCCESS;
}
else {
wrole->type = type;
wrole->mstate = MLME_NO_LINK;
#ifdef RTW_WKARD_STA_BCN_INTERVAL
#ifdef RTW_PHL_BCN
if (wrole->type == PHL_RTYPE_AP) {
wrole->bcn_cmn.bcn_interval = 100;
wrole->mstate = MLME_LINKED;
wrole->hiq_win = 16; /* unit: ms */
}
#endif
#endif
phl_init_role_cap(phl_info, wrole);
phl_init_protocol_cap(phl_info, wrole);
rtw_hal_role_cfg(phl_info->hal, wrole);
mode = PHL_UPD_ROLE_TYPE_CHANGE;
_os_mem_cpy(drv, sta->mac_addr, wrole->mac_addr, MAC_ALEN);
pstate = phl_change_stainfo(phl_info, sta, mode);
/*rtw_hal_change_sta_entry(phl_info->hal, sta);*/
_phl_role_notify(phl_info, wrole, PHL_ROLE_CHG_TYPE);
}
}
else {
PHL_ERR("cannot get stainfo_self\n");
}
PHL_DUMP_MR_EX(phl_info);
}
break;
case WR_CHG_MADDR:
{
u8 *maddr = (u8 *)chg_info;
if(_os_mem_cmp(drv, wrole->mac_addr, maddr, MAC_ALEN) == 0) {
PHL_WARN("wrole maddr %02x:%02x:%02x:%02x:%02x:%02x not change\n",
maddr[0], maddr[1], maddr[2],
maddr[3], maddr[4], maddr[5]);
return RTW_PHL_STATUS_SUCCESS;
}
PHL_INFO("wrole maddr %02x:%02x:%02x:%02x:%02x:%02x change to %02x:%02x:%02x:%02x:%02x:%02x\n",
wrole->mac_addr[0], wrole->mac_addr[1],
wrole->mac_addr[2], wrole->mac_addr[3],
wrole->mac_addr[4], wrole->mac_addr[5],
maddr[0], maddr[1], maddr[2],
maddr[3], maddr[4], maddr[5]);
_os_mem_cpy(drv, wrole->mac_addr, maddr, MAC_ALEN);
sta = rtw_phl_get_stainfo_self(phl_info, wrole);
if (sta) {
mode = PHL_UPD_ROLE_INFO_CHANGE;
_os_mem_cpy(drv, sta->mac_addr, wrole->mac_addr, MAC_ALEN);
pstate = phl_change_stainfo(phl_info, sta, mode);
/*rtw_hal_change_sta_entry(phl_info->hal, sta);*/
}
}
break;
case WR_CHG_AP_PARAM:
{
struct rtw_ap_param *param = (struct rtw_ap_param *)chg_info;
if (wrole->type != PHL_RTYPE_AP)
break;
if (param->cfg_id == CFG_BSS_CLR) {
u32 bsscolor = param->value;
wrole->proto_role_cap.bsscolor = (u8)bsscolor;
rtw_hal_role_cfg_ex(phl_info->hal, wrole, PCFG_BSS_CLR, &bsscolor);
}
/* else if (param->cfg_id == CFG_HIQ_DTIM) */
else {
PHL_INFO("wrole->type(%d) WR_CHG_AP_PARAM todo.....\n", wrole->type);
}
pstate = RTW_PHL_STATUS_SUCCESS;
}
break;
case WR_CHG_EDCA_PARAM:
{
struct rtw_edca_param *param = (struct rtw_edca_param*)chg_info;
hstate = rtw_hal_set_edca(phl_info->hal, wrole, param->ac,
param->param);
if (hstate == RTW_HAL_STATUS_SUCCESS)
pstate = RTW_PHL_STATUS_SUCCESS;
}
break;
case WR_CHG_MU_EDCA_PARAM:
{
struct rtw_mu_edca_param *param = (struct rtw_mu_edca_param*)chg_info;
hstate = rtw_hal_set_mu_edca(phl_info->hal, wrole->hw_band, param->ac,
param->timer, (param->cw & 0x0f), (param->cw>>4),
param->aifsn);
if (hstate == RTW_HAL_STATUS_SUCCESS)
pstate = RTW_PHL_STATUS_SUCCESS;
}
break;
case WR_CHG_MU_EDCA_CFG:
{
u8 val = *(u8 *)chg_info;
hstate = rtw_hal_set_mu_edca_ctrl(phl_info->hal,
wrole->hw_band, wrole->hw_wmm, val);
if (hstate == RTW_HAL_STATUS_SUCCESS)
pstate = RTW_PHL_STATUS_SUCCESS;
}
break;
case WR_CHG_BSS_COLOR:
{
u32 val = *(u8 *)chg_info;
hstate = rtw_hal_role_cfg_ex(phl_info->hal, wrole, PCFG_BSS_CLR, &val);
if (hstate == RTW_HAL_STATUS_SUCCESS)
pstate = RTW_PHL_STATUS_SUCCESS;
}
break;
case WR_CHG_RTS_TH:
{
struct rtw_rts_threshold *val =
(struct rtw_rts_threshold *)chg_info;
hstate = rtw_hal_config_rts_th(phl_info->hal,
wrole->hw_band,
val->rts_time_th,
val->rts_len_th);
if (hstate == RTW_HAL_STATUS_SUCCESS)
pstate = RTW_PHL_STATUS_SUCCESS;
}
break;
case WR_CHG_DFS_HE_TB_CFG:
{
u8 val = *(u8 *)chg_info;
hstate = rtw_hal_set_dfs_tb_ctrl(phl_info->hal, val);
if (hstate == RTW_HAL_STATUS_SUCCESS)
pstate = RTW_PHL_STATUS_SUCCESS;
}
break;
case WR_CHG_TRX_PATH:
{
struct rtw_trx_path_param *param = (struct rtw_trx_path_param*)chg_info;
hstate = rtw_hal_cfg_trx_path(phl_info->hal,
param->tx,
param->tx_nss,
param->rx,
param->rx_nss);
if (hstate == RTW_HAL_STATUS_SUCCESS)
pstate = RTW_PHL_STATUS_SUCCESS;
}
break;
case WR_CHG_MAX:
PHL_TRACE(COMP_PHL_DBG, _PHL_ERR_,
"%s: Unsupported case:%d, please check it\n",
__func__, chg_id);
break;
default :
PHL_TRACE(COMP_PHL_DBG, _PHL_ERR_,
"%s: unknown chg_id(%d), please check it\n",
__func__, chg_id);
break;
};
return pstate;
}
#ifdef CONFIG_CMD_DISP
struct wr_chg_param {
struct rtw_wifi_role_t *wrole;
enum wr_chg_id id;
u8 *info;
u8 info_len;
};
enum rtw_phl_status
phl_wifi_role_chg_hdl(struct phl_info_t *phl_info, u8 *param)
{
struct wr_chg_param *wr_chg = (struct wr_chg_param *)param;
return phl_wifi_role_change(phl_info, wr_chg->wrole, wr_chg->id, wr_chg->info);
}
void phl_wifi_role_chg_done(void *drv_priv, u8 *cmd, u32 cmd_len,
enum rtw_phl_status status)
{
struct wr_chg_param *wr_chg = NULL;
if (cmd == NULL || cmd_len == 0) {
PHL_ERR("%s buf == NULL || buf_len == 0\n", __func__);
_os_warn_on(1);
return;
}
wr_chg = (struct wr_chg_param *)cmd;
PHL_INFO("%s - id:%d .....\n", __func__, wr_chg->id);
if (wr_chg->info)
_os_kmem_free(drv_priv, wr_chg->info, wr_chg->info_len);
_os_kmem_free(drv_priv, cmd, cmd_len);
cmd = NULL;
}
enum rtw_phl_status
rtw_phl_cmd_wrole_change(void *phl,
struct rtw_wifi_role_t *wrole,
enum wr_chg_id chg_id,
u8 *chg_info,
u8 chg_info_len,
enum phl_cmd_type cmd_type,
u32 cmd_timeout)
{
enum rtw_phl_status psts = RTW_PHL_STATUS_FAILURE;
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
void *drv = phl_to_drvpriv(phl_info);
struct wr_chg_param *wr_chg = NULL;
u32 wr_chg_len = 0;
if (cmd_type == PHL_CMD_DIRECTLY) {
psts = phl_wifi_role_change(phl_info, wrole, chg_id, chg_info);
goto _exit;
}
wr_chg_len = sizeof(struct wr_chg_param);
wr_chg = _os_kmem_alloc(drv, wr_chg_len);
if (wr_chg == NULL) {
PHL_ERR("%s: alloc wr_chg_param failed!\n", __func__);
psts = RTW_PHL_STATUS_RESOURCE;
goto _exit;
}
_os_mem_set(drv, wr_chg, 0, wr_chg_len);
wr_chg->wrole = wrole;
wr_chg->id = chg_id;
wr_chg->info_len = chg_info_len;
wr_chg->info = _os_kmem_alloc(drv, chg_info_len);
if (wr_chg->info == NULL) {
PHL_ERR("%s: alloc wr_chg_info failed!\n", __func__);
psts = RTW_PHL_STATUS_RESOURCE;
goto _err_info;
}
_os_mem_set(drv, wr_chg->info, 0, chg_info_len);
_os_mem_cpy(drv, wr_chg->info, chg_info, chg_info_len);
psts = phl_cmd_enqueue(phl_info,
wrole->hw_band,
MSG_EVT_ROLE_CHANGE,
(u8*)wr_chg,
wr_chg_len,
phl_wifi_role_chg_done,
cmd_type,
cmd_timeout);
if (is_cmd_failure(psts)) {
/* Send cmd success, but wait cmd fail*/
psts = RTW_PHL_STATUS_FAILURE;
PHL_INFO("%s wr-id:%d failure\n", __func__, wrole->id);
} else if (psts != RTW_PHL_STATUS_SUCCESS) {
/* Send cmd fail */
psts = RTW_PHL_STATUS_FAILURE;
PHL_INFO("%s wr-id:%d failure\n", __func__, wrole->id);
goto _err_cmd;
} else {
PHL_INFO("%s wr-id:%d success\n", __func__, wrole->id);
}
return psts;
_err_cmd:
if (wr_chg->info)
_os_kmem_free(drv, wr_chg->info, wr_chg->info_len);
_err_info:
if (wr_chg)
_os_kmem_free(drv, wr_chg, wr_chg_len);
_exit:
return psts;
}
#else
enum rtw_phl_status
rtw_phl_cmd_wrole_change(void *phl,
struct rtw_wifi_role_t *wrole,
enum wr_chg_id chg_id, u8 *chg_info, u8 chg_info_len,
enum phl_cmd_type cmd_type, u32 cmd_timeout)
{
return phl_wifi_role_change((struct phl_info_t *)phl, wrole, chg_id, chg_info);
}
#endif /*CONFIG_CMD_DISP*/
enum rtw_phl_status
_phl_wifi_role_stop(struct phl_info_t *phl_info, struct rtw_wifi_role_t *wrole)
{
enum rtw_phl_status psts = RTW_PHL_STATUS_FAILURE;
wrole->active = false;
_phl_role_notify(phl_info, wrole, PHL_ROLE_STOP);
psts = phl_wifi_role_free_stainfo_hw(phl_info, wrole);
if (psts != RTW_PHL_STATUS_SUCCESS) {
PHL_ERR("%s wr free stainfo_hw failed\n", __func__);
_os_warn_on(1);
}
/*hw port cfg - mac_port_deinit*/
return psts;
}
#ifdef CONFIG_CMD_DISP
enum rtw_phl_status
phl_wifi_role_stop_hdl(struct phl_info_t *phl_info, u8 *param)
{
struct rtw_wifi_role_t *wrole = (struct rtw_wifi_role_t *)param;
return _phl_wifi_role_stop(phl_info, wrole);
}
void phl_wifi_role_stop_done(void *drv_priv, u8 *cmd, u32 cmd_len,
enum rtw_phl_status status)
{
if (cmd) {
struct rtw_wifi_role_t *wrole = NULL;
wrole = (struct rtw_wifi_role_t *)cmd;
if (is_cmd_failure(status) && (RTW_PHL_STATUS_SUCCESS != status))
PHL_ERR("%s wrole(%d) fail status(%d).....\n",
__func__, wrole->id, status);
else
PHL_INFO("%s wrole(%d) success.....\n",
__func__, wrole->id);
}
}
enum rtw_phl_status
phl_wifi_role_stop(struct phl_info_t *phl_info, struct rtw_wifi_role_t *wrole)
{
enum rtw_phl_status psts = RTW_PHL_STATUS_FAILURE;
psts = phl_cmd_enqueue(phl_info,
wrole->hw_band,
MSG_EVT_ROLE_STOP,
(u8 *)wrole,
0,
phl_wifi_role_stop_done,
PHL_CMD_WAIT, 0);
if (is_cmd_failure(psts)) {
/* Send cmd success, but wait cmd fail*/
psts = RTW_PHL_STATUS_FAILURE;
PHL_INFO("%s wr-id:%d failure\n", __func__, wrole->id);
} else if (psts != RTW_PHL_STATUS_SUCCESS) {
/* Send cmd fail */
psts = RTW_PHL_STATUS_FAILURE;
PHL_INFO("%s wr-id:%d failure\n", __func__, wrole->id);
} else {
PHL_INFO("%s wr-id:%d success\n", __func__, wrole->id);
}
return psts;
}
#else
enum rtw_phl_status
phl_wifi_role_stop(struct phl_info_t *phl_info, struct rtw_wifi_role_t *wrole)
{
return _phl_wifi_role_stop(phl_info, wrole);
}
#endif
void rtw_phl_wifi_role_free(void *phl, u8 role_idx)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct rtw_phl_com_t *phl_com = phl_info->phl_com;
struct rtw_wifi_role_t *wrole = NULL;
if (role_idx >= MAX_WIFI_ROLE_NUMBER) {
PHL_ERR("%s invalid role index :%d", __func__, role_idx);
return;
}
wrole = &phl_com->wifi_roles[role_idx];
if (phl_wifi_role_stop(phl_info, wrole) != RTW_PHL_STATUS_SUCCESS) {
PHL_ERR("%s role_stop failed :%d", __func__, role_idx);
_os_warn_on(1);
}
if (phl_wifi_role_free_stainfo_sw(phl_info, wrole) != RTW_PHL_STATUS_SUCCESS) {
PHL_ERR("%s wr free stainfo_sw failed\n", __func__);
_os_warn_on(1);
}
_phl_wifi_role_free_sw(phl_info, wrole);
PHL_DUMP_MR_EX(phl_info);
}
#ifdef CONFIG_CMD_DISP
enum rtw_phl_status
phl_register_mrc_module(struct phl_info_t *phl_info)
{
enum rtw_phl_status phl_status = RTW_PHL_STATUS_FAILURE;
struct mr_ctl_t *mr_ctl = phlcom_to_mr_ctrl(phl_info->phl_com);
struct phl_bk_module_ops bk_ops = mr_ctl->bk_ops;
struct phl_cmd_dispatch_engine *disp_eng = &(phl_info->disp_eng);
u8 i = 0;
for(i = 0; i < disp_eng->phy_num; i++)
{
phl_status = phl_disp_eng_register_module(phl_info, i,
PHL_MDL_MRC,
&bk_ops);
if (RTW_PHL_STATUS_SUCCESS != phl_status) {
PHL_ERR("%s register MRC module in cmd disp failed :%d\n", __func__, i+1);
goto error_register_bk;
}
}
return phl_status;
error_register_bk:
while(i > 0){
phl_status = phl_disp_eng_deregister_module(phl_info, --i,
PHL_MDL_MRC);
if (RTW_PHL_STATUS_SUCCESS != phl_status) {
PHL_ERR("%s deregister MRC module in cmd disp failed :%d\n", __func__, i+1);
}
}
return RTW_PHL_STATUS_FAILURE;
}
#endif
enum rtw_phl_status
phl_role_recover(struct phl_info_t *phl_info)
{
u8 role_idx;
struct rtw_wifi_role_t *wrole;
struct rtw_phl_stainfo_t *sta;
enum rtw_phl_status pstatus;
for (role_idx = 0; role_idx < MAX_WIFI_ROLE_NUMBER; role_idx++) {
wrole = phl_get_wrole_by_ridx(phl_info, role_idx);
if(wrole == NULL)
continue;
PHL_INFO("%s with role_idx %d\n", __func__, role_idx);
sta = rtw_phl_get_stainfo_self(phl_info, wrole);
if(sta == NULL)
continue;
pstatus = _phl_wifi_role_start(phl_info, wrole, sta);
if (pstatus != RTW_PHL_STATUS_SUCCESS) {
PHL_ERR("%s wifi role start failed\n", __func__);
return RTW_PHL_STATUS_FAILURE;
}
if (wrole->target_type != PHL_RTYPE_NONE) {
PHL_INFO("%s with role_idx %d change to role type %d\n", __func__, role_idx, wrole->target_type);
phl_wifi_role_change(phl_info, wrole, WR_CHG_TYPE, (u8 *)&wrole->target_type);
wrole->target_type = PHL_RTYPE_NONE;
}
}
return RTW_PHL_STATUS_SUCCESS;
}
enum rtw_phl_status
phl_cmd_role_recover(struct phl_info_t *phl_info)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
#ifdef CONFIG_CMD_DISP
pstatus = phl_cmd_enqueue(phl_info, HW_BAND_0, MSG_EVT_ROLE_RECOVER, NULL, 0, NULL, PHL_CMD_WAIT, 0);
if (is_cmd_failure(pstatus)) {
/* Send cmd success, but wait cmd fail*/
pstatus = RTW_PHL_STATUS_FAILURE;
} else if (pstatus != RTW_PHL_STATUS_SUCCESS) {
/* Send cmd fail */
pstatus = RTW_PHL_STATUS_FAILURE;
}
#else
pstatus = phl_role_recover(phl_info);
#endif
return pstatus;
}
enum rtw_phl_status
phl_role_suspend(struct phl_info_t *phl_info)
{
u8 role_idx;
struct rtw_wifi_role_t *wrole;
enum rtw_phl_status pstatus;
for (role_idx = 0; role_idx < MAX_WIFI_ROLE_NUMBER; role_idx++) {
wrole = phl_get_wrole_by_ridx(phl_info, role_idx);
if(wrole == NULL)
continue;
PHL_INFO("%s with role_idx %d\n", __func__, role_idx);
pstatus = _phl_wifi_role_stop(phl_info, wrole);
if (pstatus != RTW_PHL_STATUS_SUCCESS) {
PHL_ERR("%s wifi role stop failed\n", __func__);
return RTW_PHL_STATUS_FAILURE;
}
}
return RTW_PHL_STATUS_SUCCESS;
}
enum rtw_phl_status
phl_cmd_role_suspend(struct phl_info_t *phl_info)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
#ifdef CONFIG_CMD_DISP
pstatus = phl_cmd_enqueue(phl_info, HW_BAND_0, MSG_EVT_ROLE_SUSPEND, NULL, 0, NULL, PHL_CMD_WAIT, 0);
if (is_cmd_failure(pstatus)) {
/* Send cmd success, but wait cmd fail*/
pstatus = RTW_PHL_STATUS_FAILURE;
} else if (pstatus != RTW_PHL_STATUS_SUCCESS) {
/* Send cmd fail */
pstatus = RTW_PHL_STATUS_FAILURE;
}
#else
pstatus = phl_role_suspend(phl_info);
#endif
return pstatus;
}
#ifdef RTW_WKARD_LPS_ROLE_CONFIG
void phl_role_recover_unused_role(struct phl_info_t *phl_info,
struct rtw_wifi_role_t *cur_wrole)
{
u8 role_idx;
struct rtw_wifi_role_t * wrole;
enum rtw_hal_status hstatus;
if (cur_wrole == NULL) {
PHL_ERR("%s cur role is NULL\n", __func__);
return;
}
for (role_idx = 0; role_idx < MAX_WIFI_ROLE_NUMBER; role_idx++) {
wrole = phl_get_wrole_by_ridx(phl_info, role_idx);
if(wrole == NULL || !wrole->active)
continue;
if (wrole == cur_wrole)
continue;
hstatus = rtw_hal_role_cfg(phl_info->hal, wrole);
if (hstatus != RTW_HAL_STATUS_SUCCESS) {
PHL_ERR("%s fail to role cfg with id %d\n", __func__, role_idx);
} else {
PHL_INFO("%s recover role with id %d\n", __func__, role_idx);
}
}
}
void phl_role_suspend_unused_role(struct phl_info_t *phl_info,
struct rtw_wifi_role_t *cur_wrole)
{
u8 role_idx;
struct rtw_wifi_role_t *wrole;
u32 func_en = false;
if (cur_wrole == NULL) {
PHL_ERR("%s cur role is NULL\n", __func__);
return;
}
for (role_idx = 0; role_idx < MAX_WIFI_ROLE_NUMBER; role_idx++) {
wrole = phl_get_wrole_by_ridx(phl_info, role_idx);
if(wrole == NULL || !wrole->active)
continue;
if (wrole == cur_wrole)
continue;
PHL_INFO("%s suspend role with id %d\n", __func__, role_idx);
rtw_hal_role_cfg_ex(phl_info->hal, wrole, PCFG_FUNC_SW, &func_en);
}
}
#endif
bool rtw_phl_role_is_client_category(struct rtw_wifi_role_t *wrole)
{
bool ret = false;
if (wrole->type == PHL_RTYPE_STATION || wrole->type == PHL_RTYPE_P2P_GC)
ret = true;
return ret;
}
u16 phl_role_get_bcn_intvl(struct phl_info_t *phl, struct rtw_wifi_role_t *wrole)
{
struct rtw_phl_stainfo_t *sta = rtw_phl_get_stainfo_self(phl, wrole);
u16 bcn_intvl = 0;
if (phl_role_is_client_category(wrole)) {
bcn_intvl = sta->asoc_cap.bcn_interval;
#ifdef RTW_PHL_BCN
} else if (phl_role_is_ap_category(wrole)) {
bcn_intvl = (u16)wrole->bcn_cmn.bcn_interval;
#endif
} else {
PHL_WARN("phl_role_get_bcn_intvl() Unknown category, role id(%d), type(%d)\n",
wrole->id, wrole->type);
}
return bcn_intvl;
}
enum rtw_phl_status
phl_wifi_role_macid_all_pause(struct phl_info_t *phl_info, struct rtw_wifi_role_t *wrole, bool pause)
{
enum rtw_hal_status hstatus = RTW_HAL_STATUS_SUCCESS;
enum rtw_phl_status pstatus = RTW_PHL_STATUS_SUCCESS;
u8 mutli_macid = false;
switch (wrole->type) {
case PHL_RTYPE_NONE:
case PHL_RTYPE_STATION:
case PHL_RTYPE_ADHOC:
case PHL_RTYPE_P2P_DEVICE:
case PHL_RTYPE_P2P_GC:
mutli_macid = false;
break;
default:
mutli_macid = true;
break;
}
if (mutli_macid) {
void *drv = phl_to_drvpriv(phl_info);
struct macid_ctl_t *mctrl = phl_to_mac_ctrl(phl_info);
u32 macid_pause[PHL_MACID_MAX_ARRAY_NUM] = {0};
_os_spinlock(phl_to_drvpriv(phl_info), &mctrl->lock, _bh, NULL);
_os_mem_cpy(drv, macid_pause, &mctrl->wifi_role_usedmap[wrole->id][0], PHL_MACID_MAX_ARRAY_NUM);
_os_spinunlock(phl_to_drvpriv(phl_info), &mctrl->lock, _bh, NULL);
hstatus = rtw_hal_set_macid_grp_pause(phl_info->hal, macid_pause, PHL_MACID_MAX_ARRAY_NUM, pause);
if (hstatus != RTW_HAL_STATUS_SUCCESS) {
PHL_INFO("%s fail(hstatus=%d)\n", __func__, hstatus);
pstatus = RTW_PHL_STATUS_FAILURE;
}
} else {
struct rtw_phl_stainfo_t * self_sta = rtw_phl_get_stainfo_self((void *)phl_info, wrole);
hstatus = rtw_hal_set_macid_pause(phl_info->hal, self_sta->macid, pause);
if (hstatus != RTW_HAL_STATUS_SUCCESS) {
PHL_INFO("%s fail(hstatus=%d)\n", __func__, hstatus);
pstatus = RTW_PHL_STATUS_FAILURE;
}
}
return pstatus;
}
|
2301_81045437/rtl8852be
|
phl/phl_role.c
|
C
|
agpl-3.0
| 35,164
|
/******************************************************************************
*
* Copyright(c) 2019 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#ifndef _PHL_ROLE_H_
#define _PHL_ROLE_H_
#define phl_role_is_ap_category(_wrole) (_wrole->type == PHL_RTYPE_AP || _wrole->type == PHL_RTYPE_P2P_GO)
#define phl_role_is_client_category(_wrole) (_wrole->type == PHL_RTYPE_STATION || _wrole->type == PHL_RTYPE_P2P_GC)
#ifdef CONFIG_CMD_DISP
enum rtw_phl_status
phl_register_mrc_module(struct phl_info_t *phl_info);
#endif
struct rtw_wifi_role_t *
phl_get_wrole_by_ridx(struct phl_info_t *phl_info, u8 rold_idx);
struct rtw_wifi_role_t *
phl_get_wrole_by_addr(struct phl_info_t *phl_info, u8 *mac_addr);
enum rtw_phl_status
phl_role_noa_notify(struct phl_info_t *phl_info, struct rtw_wifi_role_t *wrole);
enum rtw_phl_status
phl_role_ap_client_notify(struct phl_info_t *phl_info,
struct rtw_wifi_role_t *wrole, u8 link_sts);
enum rtw_phl_status
phl_role_notify(struct phl_info_t *phl_info, struct rtw_wifi_role_t *wrole);
enum rtw_phl_status
phl_role_recover(struct phl_info_t *phl_info);
enum rtw_phl_status
phl_cmd_role_recover(struct phl_info_t *phl_info);
enum rtw_phl_status
phl_role_suspend(struct phl_info_t *phl_info);
enum rtw_phl_status
phl_cmd_role_suspend(struct phl_info_t *phl_info);
#ifdef RTW_WKARD_LPS_ROLE_CONFIG
void phl_role_recover_unused_role(struct phl_info_t *phl_info,
struct rtw_wifi_role_t *cur_wrole);
void phl_role_suspend_unused_role(struct phl_info_t *phl_info,
struct rtw_wifi_role_t *cur_wrole);
#endif
#ifdef RTW_PHL_BCN
enum rtw_phl_status
rtw_phl_free_bcn_entry(void *phl, struct rtw_wifi_role_t *wrole);
#ifdef CONFIG_CMD_DISP
enum rtw_phl_status
phl_cmd_issue_bcn_hdl(struct phl_info_t *phl_info, u8 *param);
enum rtw_phl_status
phl_cmd_stop_bcn_hdl(struct phl_info_t *phl_info, u8 *param);
#endif
#endif
bool rtw_phl_role_is_client_category(struct rtw_wifi_role_t *wrole);
u16 phl_role_get_bcn_intvl(struct phl_info_t *phl, struct rtw_wifi_role_t *wrole);
#ifdef CONFIG_CMD_DISP
enum rtw_phl_status
phl_wifi_role_start_hdl(struct phl_info_t *phl_info, u8 *param);
enum rtw_phl_status
phl_wifi_role_chg_hdl(struct phl_info_t *phl_info, u8 *param);
enum rtw_phl_status
phl_wifi_role_stop_hdl(struct phl_info_t *phl_info, u8 *param);
#endif
enum rtw_phl_status
phl_wifi_role_change(struct phl_info_t *phl_info, struct rtw_wifi_role_t *wrole,
enum wr_chg_id chg_id, void *chg_info);
enum rtw_phl_status
phl_wifi_role_macid_all_pause(struct phl_info_t *phl_info, struct rtw_wifi_role_t *wrole, bool pause);
#endif /*_PHL_ROLE_H_*/
|
2301_81045437/rtl8852be
|
phl/phl_role.h
|
C
|
agpl-3.0
| 3,080
|
/******************************************************************************
*
* Copyright(c) 2019 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#define _PHL_RX_C_
#include "phl_headers.h"
struct rtw_phl_rx_pkt *rtw_phl_query_phl_rx(void *phl)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
void *drv_priv = phl_to_drvpriv(phl_info);
struct phl_rx_pkt_pool *rx_pkt_pool = NULL;
struct rtw_phl_rx_pkt *phl_rx = NULL;
rx_pkt_pool = (struct phl_rx_pkt_pool *)phl_info->rx_pkt_pool;
_os_spinlock(drv_priv, &rx_pkt_pool->idle_lock, _bh, NULL);
if (false == list_empty(&rx_pkt_pool->idle)) {
phl_rx = list_first_entry(&rx_pkt_pool->idle,
struct rtw_phl_rx_pkt, list);
list_del(&phl_rx->list);
rx_pkt_pool->idle_cnt--;
}
_os_spinunlock(drv_priv, &rx_pkt_pool->idle_lock, _bh, NULL);
return phl_rx;
}
u8 rtw_phl_is_phl_rx_idle(struct phl_info_t *phl_info)
{
struct phl_rx_pkt_pool *rx_pkt_pool = NULL;
u8 res = false;
rx_pkt_pool = (struct phl_rx_pkt_pool *)phl_info->rx_pkt_pool;
_os_spinlock(phl_to_drvpriv(phl_info), &rx_pkt_pool->idle_lock, _bh, NULL);
if (MAX_PHL_RING_RX_PKT_NUM == rx_pkt_pool->idle_cnt)
res = true;
else
res = false;
_os_spinunlock(phl_to_drvpriv(phl_info), &rx_pkt_pool->idle_lock, _bh, NULL);
return res;
}
void phl_dump_rx_stats(struct rtw_stats *stats)
{
PHL_TRACE(COMP_PHL_XMIT, _PHL_DEBUG_,
"Dump Rx statistics\n"
"rx_byte_uni = %lld\n"
"rx_byte_total = %lld\n"
"rx_tp_kbits = %d\n"
"last_rx_time_ms = %d\n",
stats->rx_byte_uni,
stats->rx_byte_total,
stats->rx_tp_kbits,
stats->last_rx_time_ms);
}
void phl_reset_rx_stats(struct rtw_stats *stats)
{
stats->rx_byte_uni = 0;
stats->rx_byte_total = 0;
stats->rx_tp_kbits = 0;
stats->last_rx_time_ms = 0;
stats->rxtp.last_calc_time_ms = 0;
stats->rxtp.last_calc_time_ms = 0;
stats->rx_traffic.lvl = RTW_TFC_IDLE;
stats->rx_traffic.sts = 0;
stats->rx_tf_cnt = 0;
}
void
phl_rx_traffic_upd(struct rtw_stats *sts)
{
u32 tp_k = 0, tp_m = 0;
enum rtw_tfc_lvl rx_tfc_lvl = RTW_TFC_IDLE;
tp_k = sts->rx_tp_kbits;
tp_m = sts->rx_tp_kbits >> 10;
if (tp_m >= RX_HIGH_TP_THRES_MBPS)
rx_tfc_lvl = RTW_TFC_HIGH;
else if (tp_m >= RX_MID_TP_THRES_MBPS)
rx_tfc_lvl = RTW_TFC_MID;
else if (tp_m >= RX_LOW_TP_THRES_MBPS)
rx_tfc_lvl = RTW_TFC_LOW;
else if (tp_k >= RX_ULTRA_LOW_TP_THRES_KBPS)
rx_tfc_lvl = RTW_TFC_ULTRA_LOW;
else
rx_tfc_lvl = RTW_TFC_IDLE;
if (sts->rx_traffic.lvl > rx_tfc_lvl) {
sts->rx_traffic.sts = (TRAFFIC_CHANGED | TRAFFIC_DECREASE);
sts->rx_traffic.lvl = rx_tfc_lvl;
} else if (sts->rx_traffic.lvl < rx_tfc_lvl) {
sts->rx_traffic.sts = (TRAFFIC_CHANGED | TRAFFIC_INCREASE);
sts->rx_traffic.lvl = rx_tfc_lvl;
} else if (sts->rx_traffic.sts &
(TRAFFIC_CHANGED | TRAFFIC_INCREASE | TRAFFIC_DECREASE)) {
sts->rx_traffic.sts &= ~(TRAFFIC_CHANGED | TRAFFIC_INCREASE |
TRAFFIC_DECREASE);
}
}
void phl_update_rx_stats(struct rtw_stats *stats, struct rtw_recv_pkt *rx_pkt)
{
u32 diff_t = 0, cur_time = _os_get_cur_time_ms();
u64 diff_bits = 0;
stats->last_rx_time_ms = cur_time;
stats->rx_byte_total += rx_pkt->mdata.pktlen;
if (rx_pkt->mdata.bc == 0 && rx_pkt->mdata.mc == 0)
stats->rx_byte_uni += rx_pkt->mdata.pktlen;
if (0 == stats->rxtp.last_calc_time_ms ||
0 == stats->rxtp.last_calc_bits) {
stats->rxtp.last_calc_time_ms = stats->last_rx_time_ms;
stats->rxtp.last_calc_bits = stats->rx_byte_uni * 8;
} else {
if (cur_time >= stats->rxtp.last_calc_time_ms) {
diff_t = cur_time - stats->rxtp.last_calc_time_ms;
} else {
diff_t = RTW_U32_MAX - stats->rxtp.last_calc_time_ms +
cur_time + 1;
}
if (diff_t > RXTP_CALC_DIFF_MS && stats->rx_byte_uni != 0) {
diff_bits = (stats->rx_byte_uni * 8) -
stats->rxtp.last_calc_bits;
stats->rx_tp_kbits = (u32)_os_division64(diff_bits,
diff_t);
stats->rxtp.last_calc_bits = stats->rx_byte_uni * 8;
stats->rxtp.last_calc_time_ms = cur_time;
}
}
}
void phl_rx_statistics(struct phl_info_t *phl_info, struct rtw_recv_pkt *rx_pkt)
{
struct rtw_phl_com_t *phl_com = phl_info->phl_com;
struct rtw_stats *phl_stats = &phl_com->phl_stats;
struct rtw_stats *sta_stats = NULL;
struct rtw_phl_stainfo_t *sta = NULL;
u16 macid = rx_pkt->mdata.macid;
if (!phl_macid_is_valid(phl_info, macid))
goto dev_stat;
sta = rtw_phl_get_stainfo_by_macid(phl_info, macid);
if (NULL == sta)
goto dev_stat;
sta_stats = &sta->stats;
phl_update_rx_stats(sta_stats, rx_pkt);
dev_stat:
phl_update_rx_stats(phl_stats, rx_pkt);
}
void phl_release_phl_rx(struct phl_info_t *phl_info,
struct rtw_phl_rx_pkt *phl_rx)
{
void *drv_priv = phl_to_drvpriv(phl_info);
struct phl_rx_pkt_pool *rx_pkt_pool = NULL;
rx_pkt_pool = (struct phl_rx_pkt_pool *)phl_info->rx_pkt_pool;
_os_spinlock(drv_priv, &rx_pkt_pool->idle_lock, _bh, NULL);
_os_mem_set(phl_to_drvpriv(phl_info), &phl_rx->r, 0, sizeof(phl_rx->r));
phl_rx->type = RTW_RX_TYPE_MAX;
phl_rx->rxbuf_ptr = NULL;
INIT_LIST_HEAD(&phl_rx->list);
list_add_tail(&phl_rx->list, &rx_pkt_pool->idle);
rx_pkt_pool->idle_cnt++;
_os_spinunlock(drv_priv, &rx_pkt_pool->idle_lock, _bh, NULL);
}
static void phl_free_recv_pkt_pool(struct phl_info_t *phl_info)
{
struct phl_rx_pkt_pool *rx_pkt_pool = NULL;
u32 buf_len = 0;
FUNCIN();
rx_pkt_pool = (struct phl_rx_pkt_pool *)phl_info->rx_pkt_pool;
if (NULL != rx_pkt_pool) {
_os_spinlock_free(phl_to_drvpriv(phl_info),
&rx_pkt_pool->idle_lock);
_os_spinlock_free(phl_to_drvpriv(phl_info),
&rx_pkt_pool->busy_lock);
buf_len = sizeof(*rx_pkt_pool);
_os_mem_free(phl_to_drvpriv(phl_info), rx_pkt_pool, buf_len);
}
FUNCOUT();
}
void phl_rx_deinit(struct phl_info_t *phl_info)
{
/* TODO: rx reorder deinit */
/* TODO: peer info deinit */
phl_free_recv_pkt_pool(phl_info);
}
static enum rtw_phl_status phl_alloc_recv_pkt_pool(struct phl_info_t *phl_info)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
struct phl_rx_pkt_pool *rx_pkt_pool = NULL;
struct rtw_phl_rx_pkt *phl_rx = NULL;
u32 buf_len = 0, i = 0;
FUNCIN_WSTS(pstatus);
buf_len = sizeof(*rx_pkt_pool);
rx_pkt_pool = _os_mem_alloc(phl_to_drvpriv(phl_info), buf_len);
if (NULL != rx_pkt_pool) {
_os_mem_set(phl_to_drvpriv(phl_info), rx_pkt_pool, 0, buf_len);
INIT_LIST_HEAD(&rx_pkt_pool->idle);
INIT_LIST_HEAD(&rx_pkt_pool->busy);
_os_spinlock_init(phl_to_drvpriv(phl_info),
&rx_pkt_pool->idle_lock);
_os_spinlock_init(phl_to_drvpriv(phl_info),
&rx_pkt_pool->busy_lock);
rx_pkt_pool->idle_cnt = 0;
for (i = 0; i < MAX_PHL_RING_RX_PKT_NUM; i++) {
phl_rx = &rx_pkt_pool->phl_rx[i];
INIT_LIST_HEAD(&phl_rx->list);
list_add_tail(&phl_rx->list, &rx_pkt_pool->idle);
rx_pkt_pool->idle_cnt++;
}
phl_info->rx_pkt_pool = rx_pkt_pool;
pstatus = RTW_PHL_STATUS_SUCCESS;
}
if (RTW_PHL_STATUS_SUCCESS != pstatus)
phl_free_recv_pkt_pool(phl_info);
FUNCOUT_WSTS(pstatus);
return pstatus;
}
enum rtw_phl_status phl_rx_init(struct phl_info_t *phl_info)
{
enum rtw_phl_status status;
/* Allocate rx packet pool */
status = phl_alloc_recv_pkt_pool(phl_info);
if (status != RTW_PHL_STATUS_SUCCESS)
return status;
/* TODO: Peer info init */
/* TODO: Rx reorder init */
return RTW_PHL_STATUS_SUCCESS;
}
void phl_recycle_rx_buf(struct phl_info_t *phl_info,
struct rtw_phl_rx_pkt *phl_rx)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
struct phl_hci_trx_ops *hci_trx_ops = phl_info->hci_trx_ops;
struct rtw_rx_buf *rx_buf = NULL;
do {
if (NULL == phl_rx) {
PHL_TRACE(COMP_PHL_RECV, _PHL_WARNING_, "[WARNING]phl_rx is NULL!\n");
break;
}
rx_buf = (struct rtw_rx_buf *)phl_rx->rxbuf_ptr;
PHL_TRACE(COMP_PHL_RECV, _PHL_DEBUG_, "[4] %s:: [%p]\n",
__FUNCTION__, rx_buf);
if (phl_rx->rxbuf_ptr) {
pstatus = hci_trx_ops->recycle_rx_buf(phl_info, rx_buf,
phl_rx->r.mdata.dma_ch,
phl_rx->type);
}
if (RTW_PHL_STATUS_SUCCESS != pstatus && phl_rx->rxbuf_ptr)
PHL_TRACE(COMP_PHL_RECV, _PHL_WARNING_, "[WARNING]recycle hci rx buf error!\n");
phl_release_phl_rx(phl_info, phl_rx);
} while (false);
}
void _phl_indic_new_rxpkt(struct phl_info_t *phl_info)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_SUCCESS;
struct rtw_evt_info_t *evt_info = &phl_info->phl_com->evt_info;
void *drv_priv = phl_to_drvpriv(phl_info);
FUNCIN_WSTS(pstatus);
do {
_os_spinlock(drv_priv, &evt_info->evt_lock, _bh, NULL);
evt_info->evt_bitmap |= RTW_PHL_EVT_RX;
_os_spinunlock(drv_priv, &evt_info->evt_lock, _bh, NULL);
pstatus = phl_schedule_handler(phl_info->phl_com,
&phl_info->phl_event_handler);
} while (false);
if (RTW_PHL_STATUS_SUCCESS != pstatus)
PHL_TRACE(COMP_PHL_RECV, _PHL_WARNING_, "[WARNING] Trigger rx indic event fail!\n");
FUNCOUT_WSTS(pstatus);
#ifdef PHL_RX_BATCH_IND
phl_info->rx_new_pending = 0;
#endif
}
void _phl_record_rx_stats(struct rtw_recv_pkt *recvpkt)
{
if(NULL == recvpkt)
return;
if (recvpkt->tx_sta)
recvpkt->tx_sta->stats.rx_rate = recvpkt->mdata.rx_rate;
}
enum rtw_phl_status _phl_add_rx_pkt(struct phl_info_t *phl_info,
struct rtw_phl_rx_pkt *phl_rx)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
struct rtw_phl_rx_ring *ring = &phl_info->phl_rx_ring;
struct rtw_recv_pkt *recvpkt = &phl_rx->r;
u16 ring_res = 0, wptr = 0, rptr = 0;
void *drv = phl_to_drvpriv(phl_info);
FUNCIN_WSTS(pstatus);
_os_spinlock(drv, &phl_info->rx_ring_lock, _bh, NULL);
if (!ring)
goto out;
wptr = (u16)_os_atomic_read(drv, &ring->phl_idx);
rptr = (u16)_os_atomic_read(drv, &ring->core_idx);
ring_res = phl_calc_avail_wptr(rptr, wptr, MAX_PHL_RING_ENTRY_NUM);
PHL_TRACE(COMP_PHL_RECV, _PHL_DEBUG_,
"[3] _phl_add_rx_pkt::[Query] phl_idx =%d , core_idx =%d , ring_res =%d\n",
_os_atomic_read(drv, &ring->phl_idx),
_os_atomic_read(drv, &ring->core_idx),
ring_res);
if (ring_res <= 0) {
PHL_TRACE(COMP_PHL_RECV, _PHL_INFO_, "no ring resource to add new rx pkt!\n");
pstatus = RTW_PHL_STATUS_RESOURCE;
goto out;
}
wptr = wptr + 1;
if (wptr >= MAX_PHL_RING_ENTRY_NUM)
wptr = 0;
ring->entry[wptr] = recvpkt;
if (wptr)
_os_atomic_inc(drv, &ring->phl_idx);
else
_os_atomic_set(drv, &ring->phl_idx, 0);
#ifdef PHL_RX_BATCH_IND
phl_info->rx_new_pending = 1;
pstatus = RTW_PHL_STATUS_SUCCESS;
#endif
out:
_os_spinunlock(drv, &phl_info->rx_ring_lock, _bh, NULL);
if(pstatus == RTW_PHL_STATUS_SUCCESS)
_phl_record_rx_stats(recvpkt);
FUNCOUT_WSTS(pstatus);
return pstatus;
}
void
phl_sta_ps_enter(struct phl_info_t *phl_info, struct rtw_phl_stainfo_t *sta,
struct rtw_wifi_role_t *role)
{
void *d = phl_to_drvpriv(phl_info);
/* enum rtw_hal_status hal_status; */
struct rtw_phl_evt_ops *ops = &phl_info->phl_com->evt_ops;
_os_atomic_set(d, &sta->ps_sta, 1);
PHL_TRACE(COMP_PHL_PS, _PHL_INFO_,
"STA %02X:%02X:%02X:%02X:%02X:%02X enters PS mode, AID=%u, macid=%u, sta=0x%p\n",
sta->mac_addr[0], sta->mac_addr[1], sta->mac_addr[2],
sta->mac_addr[3], sta->mac_addr[4], sta->mac_addr[5],
sta->aid, sta->macid, sta);
/* TODO: comment out because beacon may stop if we do this frequently */
/* hal_status = rtw_hal_set_macid_pause(phl_info->hal, */
/* sta->macid, true); */
/* if (RTW_HAL_STATUS_SUCCESS != hal_status) { */
/* PHL_WARN("%s(): failed to pause macid tx, macid=%u\n", */
/* __FUNCTION__, sta->macid); */
/* } */
if (ops->ap_ps_sta_ps_change)
ops->ap_ps_sta_ps_change(d, role->id, sta->mac_addr, true);
}
void
phl_sta_ps_exit(struct phl_info_t *phl_info, struct rtw_phl_stainfo_t *sta,
struct rtw_wifi_role_t *role)
{
void *d = phl_to_drvpriv(phl_info);
/* enum rtw_hal_status hal_status; */
struct rtw_phl_evt_ops *ops = &phl_info->phl_com->evt_ops;
PHL_TRACE(COMP_PHL_PS, _PHL_INFO_,
"STA %02X:%02X:%02X:%02X:%02X:%02X leaves PS mode, AID=%u, macid=%u, sta=0x%p\n",
sta->mac_addr[0], sta->mac_addr[1], sta->mac_addr[2],
sta->mac_addr[3], sta->mac_addr[4], sta->mac_addr[5],
sta->aid, sta->macid, sta);
_os_atomic_set(d, &sta->ps_sta, 0);
/* TODO: comment out because beacon may stop if we do this frequently */
/* hal_status = rtw_hal_set_macid_pause(phl_info->hal, */
/* sta->macid, false); */
/* if (RTW_HAL_STATUS_SUCCESS != hal_status) { */
/* PHL_WARN("%s(): failed to resume macid tx, macid=%u\n", */
/* __FUNCTION__, sta->macid); */
/* } */
if (ops->ap_ps_sta_ps_change)
ops->ap_ps_sta_ps_change(d, role->id, sta->mac_addr, false);
}
void
phl_rx_handle_sta_process(struct phl_info_t *phl_info,
struct rtw_phl_rx_pkt *rx)
{
struct rtw_r_meta_data *m = &rx->r.mdata;
struct rtw_wifi_role_t *role = NULL;
struct rtw_phl_stainfo_t *sta = NULL;
void *d = phl_to_drvpriv(phl_info);
if (!phl_info->phl_com->dev_sw_cap.ap_ps)
return;
if (m->addr_cam_vld) {
sta = rtw_phl_get_stainfo_by_macid(phl_info, m->macid);
if (sta && sta->wrole)
role = sta->wrole;
}
if (!sta) {
role = phl_get_wrole_by_addr(phl_info, m->mac_addr);
if (role)
sta = rtw_phl_get_stainfo_by_addr(phl_info,
role, m->ta);
}
if (!role || !sta)
return;
rx->r.tx_sta = sta;
rx->r.rx_role = role;
PHL_TRACE(COMP_PHL_PS, _PHL_DEBUG_,
"ap-ps: more_frag=%u, frame_type=%u, role_type=%d, pwr_bit=%u, seq=%u\n",
m->more_frag, m->frame_type, role->type, m->pwr_bit, m->seq);
/*
* Change STA PS state based on the PM bit in frame control
*/
if (!m->more_frag &&
(m->frame_type == RTW_FRAME_TYPE_DATA ||
m->frame_type == RTW_FRAME_TYPE_CTRL) &&
(role->type == PHL_RTYPE_AP ||
role->type == PHL_RTYPE_P2P_GO)) {
if (_os_atomic_read(d, &sta->ps_sta)) {
if (!m->pwr_bit)
phl_sta_ps_exit(phl_info, sta, role);
} else {
if (m->pwr_bit)
phl_sta_ps_enter(phl_info, sta, role);
}
}
}
void
phl_handle_rx_frame_list(struct phl_info_t *phl_info,
_os_list *frames)
{
struct rtw_phl_rx_pkt *pos, *n;
enum rtw_phl_status status = RTW_PHL_STATUS_FAILURE;
struct phl_hci_trx_ops *hci_trx_ops = phl_info->hci_trx_ops;
phl_list_for_loop_safe(pos, n, struct rtw_phl_rx_pkt, frames, list) {
list_del(&pos->list);
phl_rx_handle_sta_process(phl_info, pos);
status = _phl_add_rx_pkt(phl_info, pos);
if (RTW_PHL_STATUS_RESOURCE == status) {
hci_trx_ops->recycle_rx_pkt(phl_info, pos);
}
}
#ifndef PHL_RX_BATCH_IND
_phl_indic_new_rxpkt(phl_info);
#endif
}
#define SEQ_MODULO 0x1000
#define SEQ_MASK 0xfff
static inline int seq_less(u16 sq1, u16 sq2)
{
return ((sq1 - sq2) & SEQ_MASK) > (SEQ_MODULO >> 1);
}
static inline u16 seq_inc(u16 sq)
{
return (sq + 1) & SEQ_MASK;
}
static inline u16 seq_sub(u16 sq1, u16 sq2)
{
return (sq1 - sq2) & SEQ_MASK;
}
static inline u16 reorder_index(struct phl_tid_ampdu_rx *r, u16 seq)
{
return seq_sub(seq, r->ssn) % r->buf_size;
}
static void phl_release_reorder_frame(struct phl_info_t *phl_info,
struct phl_tid_ampdu_rx *r,
int index, _os_list *frames)
{
struct rtw_phl_rx_pkt *pkt = r->reorder_buf[index];
if (!pkt)
goto out;
/* release the frame from the reorder ring buffer */
r->stored_mpdu_num--;
r->reorder_buf[index] = NULL;
list_add_tail(&pkt->list, frames);
out:
r->head_seq_num = seq_inc(r->head_seq_num);
}
#define HT_RX_REORDER_BUF_TIMEOUT_MS 100
/*
* If the MPDU at head_seq_num is ready,
* 1. release all subsequent MPDUs with consecutive SN and
* 2. if there's MPDU that is ready but left in the reordering
* buffer, find it and set reorder timer according to its reorder
* time
*
* If the MPDU at head_seq_num is not ready and there is no MPDU ready
* in the buffer at all, return.
*
* If the MPDU at head_seq_num is not ready but there is some MPDU in
* the buffer that is ready, check whether any frames in the reorder
* buffer have timed out in the following way.
*
* Basically, MPDUs that are not ready are purged and MPDUs that are
* ready are released.
*
* The process goes through all the buffer but the one at head_seq_num
* unless
* - there's a MPDU that is ready AND
* - there are one or more buffers that are not ready.
* In this case, the process is stopped, the head_seq_num becomes the
* first buffer that is not ready and the reorder_timer is reset based
* on the reorder_time of that ready MPDU.
*/
static void phl_reorder_release(struct phl_info_t *phl_info,
struct phl_tid_ampdu_rx *r, _os_list *frames)
{
/* ref ieee80211_sta_reorder_release() and wil_reorder_release() */
int index, i, j;
u32 cur_time = _os_get_cur_time_ms();
/* release the buffer until next missing frame */
index = reorder_index(r, r->head_seq_num);
if (!r->reorder_buf[index] && r->stored_mpdu_num) {
/*
* No buffers ready to be released, but check whether any
* frames in the reorder buffer have timed out.
*/
int skipped = 1;
for (j = (index + 1) % r->buf_size; j != index;
j = (j + 1) % r->buf_size) {
if (!r->reorder_buf[j]) {
skipped++;
continue;
}
if (skipped && cur_time < r->reorder_time[j] +
HT_RX_REORDER_BUF_TIMEOUT_MS)
goto set_release_timer;
/* don't leave incomplete A-MSDUs around */
for (i = (index + 1) % r->buf_size; i != j;
i = (i + 1) % r->buf_size)
phl_recycle_rx_buf(phl_info, r->reorder_buf[i]);
PHL_TRACE(COMP_PHL_RECV, _PHL_INFO_, "release an RX reorder frame due to timeout on earlier frames\n");
phl_release_reorder_frame(phl_info, r, j, frames);
/*
* Increment the head seq# also for the skipped slots.
*/
r->head_seq_num =
(r->head_seq_num + skipped) & SEQ_MASK;
skipped = 0;
}
} else while (r->reorder_buf[index]) {
phl_release_reorder_frame(phl_info, r, index, frames);
index = reorder_index(r, r->head_seq_num);
}
if (r->stored_mpdu_num) {
j = index = r->head_seq_num % r->buf_size;
for (; j != (index - 1) % r->buf_size;
j = (j + 1) % r->buf_size) {
if (r->reorder_buf[j])
break;
}
set_release_timer:
if (!r->removed)
_os_set_timer(r->drv_priv, &r->sta->reorder_timer,
HT_RX_REORDER_BUF_TIMEOUT_MS);
} else {
/* TODO: implementation of cancel timer on Linux is
del_timer_sync(), it can't be called with same spinlock
held with the expiration callback, that causes a potential
deadlock. */
_os_cancel_timer_async(r->drv_priv, &r->sta->reorder_timer);
}
}
void phl_sta_rx_reorder_timer_expired(void *t)
{
/* ref sta_rx_agg_reorder_timer_expired() */
struct rtw_phl_stainfo_t *sta = (struct rtw_phl_stainfo_t *)t;
struct rtw_phl_com_t *phl_com = sta->wrole->phl_com;
struct phl_info_t *phl_info = (struct phl_info_t *)phl_com->phl_priv;
void *drv_priv = phl_to_drvpriv(phl_info);
u8 i = 0;
PHL_INFO("Rx reorder timer expired, sta=0x%p\n", sta);
for (i = 0; i < ARRAY_SIZE(sta->tid_rx); i++) {
_os_list frames;
INIT_LIST_HEAD(&frames);
_os_spinlock(drv_priv, &sta->tid_rx_lock, _bh, NULL);
if (sta->tid_rx[i])
phl_reorder_release(phl_info, sta->tid_rx[i], &frames);
_os_spinunlock(drv_priv, &sta->tid_rx_lock, _bh, NULL);
phl_handle_rx_frame_list(phl_info, &frames);
#ifdef PHL_RX_BATCH_IND
_phl_indic_new_rxpkt(phl_info);
#endif
}
_os_event_set(drv_priv, &sta->comp_sync);
}
static void phl_release_reorder_frames(struct phl_info_t *phl_info,
struct phl_tid_ampdu_rx *r,
u16 head_seq_num, _os_list *frames)
{
/* ref ieee80211_release_reorder_frames() and
wil_release_reorder_frames() */
int index;
/* note: this function is never called with
* hseq preceding r->head_seq_num, i.e it is always true
* !seq_less(hseq, r->head_seq_num)
* and thus on loop exit it should be
* r->head_seq_num == hseq
*/
while (seq_less(r->head_seq_num, head_seq_num) &&
r->stored_mpdu_num) { /* Note: do we need to check this? */
index = reorder_index(r, r->head_seq_num);
phl_release_reorder_frame(phl_info, r, index, frames);
}
r->head_seq_num = head_seq_num;
}
static bool phl_manage_sta_reorder_buf(struct phl_info_t *phl_info,
struct rtw_phl_rx_pkt *pkt,
struct phl_tid_ampdu_rx *r,
_os_list *frames)
{
/* ref ieee80211_sta_manage_reorder_buf() and wil_rx_reorder() */
struct rtw_r_meta_data *meta = &pkt->r.mdata;
u16 mpdu_seq_num = meta->seq;
u16 head_seq_num, buf_size;
int index;
struct phl_hci_trx_ops *hci_trx_ops = phl_info->hci_trx_ops;
buf_size = r->buf_size;
head_seq_num = r->head_seq_num;
/*
* If the current MPDU's SN is smaller than the SSN, it shouldn't
* be reordered.
*/
if (!r->started) {
if (seq_less(mpdu_seq_num, head_seq_num))
return false;
r->started = true;
}
/* frame with out of date sequence number */
if (seq_less(mpdu_seq_num, head_seq_num)) {
PHL_TRACE(COMP_PHL_RECV, _PHL_DEBUG_, "Rx drop: old seq 0x%03x head 0x%03x\n",
meta->seq, r->head_seq_num);
hci_trx_ops->recycle_rx_pkt(phl_info, pkt);
return true;
}
/*
* If frame the sequence number exceeds our buffering window
* size release some previous frames to make room for this one.
*/
if (!seq_less(mpdu_seq_num, head_seq_num + buf_size)) {
head_seq_num = seq_inc(seq_sub(mpdu_seq_num, buf_size));
/* release stored frames up to new head to stack */
phl_release_reorder_frames(phl_info, r, head_seq_num, frames);
}
/* Now the new frame is always in the range of the reordering buffer */
index = reorder_index(r, mpdu_seq_num);
/* check if we already stored this frame */
if (r->reorder_buf[index]) {
PHL_TRACE(COMP_PHL_RECV, _PHL_DEBUG_, "Rx drop: old seq 0x%03x head 0x%03x\n",
meta->seq, r->head_seq_num);
hci_trx_ops->recycle_rx_pkt(phl_info, pkt);
return true;
}
/*
* If the current MPDU is in the right order and nothing else
* is stored we can process it directly, no need to buffer it.
* If it is first but there's something stored, we may be able
* to release frames after this one.
*/
if (mpdu_seq_num == r->head_seq_num &&
r->stored_mpdu_num == 0) {
r->head_seq_num = seq_inc(r->head_seq_num);
return false;
}
/* put the frame in the reordering buffer */
r->reorder_buf[index] = pkt;
r->reorder_time[index] = _os_get_cur_time_ms();
r->stored_mpdu_num++;
phl_reorder_release(phl_info, r, frames);
return true;
}
enum rtw_phl_status phl_rx_reorder(struct phl_info_t *phl_info,
struct rtw_phl_rx_pkt *phl_rx,
_os_list *frames)
{
/* ref wil_rx_reorder() and ieee80211_rx_reorder_ampdu() */
void *drv_priv = phl_to_drvpriv(phl_info);
struct rtw_r_meta_data *meta = &phl_rx->r.mdata;
u16 tid = meta->tid;
struct rtw_phl_stainfo_t *sta = NULL;
struct phl_tid_ampdu_rx *r;
struct phl_hci_trx_ops *hci_trx_ops = phl_info->hci_trx_ops;
/*
* Remove FCS if is is appended
* TODO: handle more than one in pkt_list
*/
if (phl_info->phl_com->append_fcs) {
/*
* Only last MSDU of A-MSDU includes FCS.
* TODO: If A-MSDU cut processing is in HAL, should only deduct
* FCS from length of last one of pkt_list. For such case,
* phl_rx->r should have pkt_list length.
*/
if (!(meta->amsdu_cut && !meta->last_msdu)) {
if (phl_rx->r.pkt_list[0].length <= 4) {
PHL_ERR("%s, pkt_list[0].length(%d) too short\n",
__func__, phl_rx->r.pkt_list[0].length);
goto drop_frame;
}
phl_rx->r.pkt_list[0].length -= 4;
}
}
if (meta->bc || meta->mc)
goto dont_reorder;
if (!meta->qos)
goto dont_reorder;
if (meta->q_null)
goto dont_reorder;
/* TODO: check ba policy is either ba or normal */
/* if the mpdu is fragmented, don't reorder */
if (meta->more_frag || meta->frag_num) {
PHL_TRACE(COMP_PHL_RECV, _PHL_ERR_,
"Receive QoS Data with more_frag=%u, frag_num=%u\n",
meta->more_frag, meta->frag_num);
goto dont_reorder;
}
/* Use MAC ID from address CAM if this packet is address CAM matched */
if (meta->addr_cam_vld)
sta = rtw_phl_get_stainfo_by_macid(phl_info, meta->macid);
/* Otherwise, search STA by TA */
if (!sta || !sta->wrole) {
struct rtw_wifi_role_t *wrole;
wrole = phl_get_wrole_by_addr(phl_info, meta->mac_addr);
if (wrole)
sta = rtw_phl_get_stainfo_by_addr(phl_info,
wrole, meta->ta);
if (!wrole || !sta) {
PHL_TRACE(COMP_PHL_RECV, _PHL_WARNING_,
"%s(): stainfo or wrole not found, cam=%u, macid=%u\n",
__FUNCTION__, meta->addr_cam, meta->macid);
goto dont_reorder;
}
}
phl_rx->r.tx_sta = sta;
phl_rx->r.rx_role = sta->wrole;
rtw_hal_set_sta_rx_sts(sta, false, meta);
if (tid >= ARRAY_SIZE(sta->tid_rx)) {
PHL_TRACE(COMP_PHL_RECV, _PHL_ERR_, "Fail: tid (%u) index out of range (%u)\n", tid, 8);
goto drop_frame;
}
_os_spinlock(drv_priv, &sta->tid_rx_lock, _bh, NULL);
r = sta->tid_rx[tid];
if (!r) {
_os_spinunlock(drv_priv, &sta->tid_rx_lock, _bh, NULL);
goto dont_reorder;
}
if (!phl_manage_sta_reorder_buf(phl_info, phl_rx, r, frames)) {
_os_spinunlock(drv_priv, &sta->tid_rx_lock, _bh, NULL);
goto dont_reorder;
}
_os_spinunlock(drv_priv, &sta->tid_rx_lock, _bh, NULL);
return RTW_PHL_STATUS_SUCCESS;
drop_frame:
hci_trx_ops->recycle_rx_pkt(phl_info, phl_rx);
return RTW_PHL_STATUS_FAILURE;
dont_reorder:
list_add_tail(&phl_rx->list, frames);
return RTW_PHL_STATUS_SUCCESS;
}
u8 phl_check_recv_ring_resource(struct phl_info_t *phl_info)
{
struct rtw_phl_rx_ring *ring = &phl_info->phl_rx_ring;
u16 avail = 0, wptr = 0, rptr = 0;
void *drv_priv = phl_to_drvpriv(phl_info);
wptr = (u16)_os_atomic_read(drv_priv, &ring->phl_idx);
rptr = (u16)_os_atomic_read(drv_priv, &ring->core_idx);
avail = phl_calc_avail_wptr(rptr, wptr, MAX_PHL_RING_ENTRY_NUM);
if (0 == avail)
return false;
else
return true;
}
void dump_phl_rx_ring(void *phl)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
void *drv_priv = phl_to_drvpriv(phl_info);
s16 diff = 0;
u16 idx = 0, endidx = 0;
u16 phl_idx = 0, core_idx = 0;
PHL_TRACE(COMP_PHL_RECV, _PHL_DEBUG_, "===Dump PHL RX Ring===\n");
phl_idx = (u16)_os_atomic_read(drv_priv, &phl_info->phl_rx_ring.phl_idx);
core_idx = (u16)_os_atomic_read(drv_priv, &phl_info->phl_rx_ring.core_idx);
PHL_TRACE(COMP_PHL_RECV, _PHL_DEBUG_,
"core_idx = %d\n"
"phl_idx = %d\n",
core_idx,
phl_idx);
diff= phl_idx-core_idx;
if(diff < 0)
diff= 4096+diff;
endidx = diff > 5 ? (core_idx+6): phl_idx;
for (idx = core_idx+1; idx < endidx; idx++) {
PHL_TRACE(COMP_PHL_RECV, _PHL_DEBUG_, "entry[%d] = %p\n", idx,
phl_info->phl_rx_ring.entry[idx%4096]);
}
}
void phl_event_indicator(void *context)
{
enum rtw_phl_status sts = RTW_PHL_STATUS_FAILURE;
struct rtw_phl_handler *phl_handler
= (struct rtw_phl_handler *)phl_container_of(context,
struct rtw_phl_handler,
os_handler);
struct phl_info_t *phl_info = (struct phl_info_t *)phl_handler->context;
struct rtw_phl_evt_ops *ops = NULL;
struct rtw_evt_info_t *evt_info = NULL;
void *drv_priv = NULL;
enum rtw_phl_evt evt_bitmap = 0;
FUNCIN_WSTS(sts);
if (NULL != phl_info) {
ops = &phl_info->phl_com->evt_ops;
evt_info = &phl_info->phl_com->evt_info;
drv_priv = phl_to_drvpriv(phl_info);
_os_spinlock(drv_priv, &evt_info->evt_lock, _bh, NULL);
evt_bitmap = evt_info->evt_bitmap;
evt_info->evt_bitmap = 0;
_os_spinunlock(drv_priv, &evt_info->evt_lock, _bh, NULL);
if (RTW_PHL_EVT_RX & evt_bitmap) {
if (NULL != ops->rx_process) {
sts = ops->rx_process(drv_priv);
}
dump_phl_rx_ring(phl_info);
}
}
FUNCOUT_WSTS(sts);
}
void _phl_rx_statistics_reset(struct phl_info_t *phl_info)
{
struct rtw_phl_com_t *phl_com = phl_info->phl_com;
struct rtw_phl_stainfo_t *sta = NULL;
struct rtw_wifi_role_t *role = NULL;
void *drv = phl_to_drvpriv(phl_info);
struct phl_queue *sta_queue;
u8 i;
for (i = 0; i< MAX_WIFI_ROLE_NUMBER; i++) {
role = &phl_com->wifi_roles[i];
if (role->active && (role->mstate == MLME_LINKED)) {
sta_queue = &role->assoc_sta_queue;
_os_spinlock(drv, &sta_queue->lock, _bh, NULL);
phl_list_for_loop(sta, struct rtw_phl_stainfo_t,
&sta_queue->queue, list) {
if (sta)
rtw_hal_set_sta_rx_sts(sta, true, NULL);
}
_os_spinunlock(drv, &sta_queue->lock, _bh, NULL);
}
}
}
void
phl_rx_watchdog(struct phl_info_t *phl_info)
{
struct rtw_stats *phl_stats = &phl_info->phl_com->phl_stats;
phl_rx_traffic_upd(phl_stats);
phl_dump_rx_stats(phl_stats);
_phl_rx_statistics_reset(phl_info);
}
u16 rtw_phl_query_new_rx_num(void *phl)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct rtw_phl_rx_ring *ring = NULL;
u16 new_rx = 0, wptr = 0, rptr = 0;
if (NULL != phl_info) {
ring = &phl_info->phl_rx_ring;
wptr = (u16)_os_atomic_read(phl_to_drvpriv(phl_info),
&ring->phl_idx);
rptr = (u16)_os_atomic_read(phl_to_drvpriv(phl_info),
&ring->core_idx);
new_rx = phl_calc_avail_rptr(rptr, wptr,
MAX_PHL_RING_ENTRY_NUM);
}
return new_rx;
}
struct rtw_recv_pkt *rtw_phl_query_rx_pkt(void *phl)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct rtw_phl_rx_ring *ring = NULL;
struct rtw_recv_pkt *recvpkt = NULL;
void *drv_priv = NULL;
u16 ring_res = 0, wptr = 0, rptr = 0;
if (NULL != phl_info) {
ring = &phl_info->phl_rx_ring;
drv_priv = phl_to_drvpriv(phl_info);
wptr = (u16)_os_atomic_read(drv_priv, &ring->phl_idx);
rptr = (u16)_os_atomic_read(drv_priv, &ring->core_idx);
ring_res = phl_calc_avail_rptr(rptr, wptr,
MAX_PHL_RING_ENTRY_NUM);
PHL_TRACE(COMP_PHL_RECV, _PHL_DEBUG_,
"[4] %s::[Query] phl_idx =%d , core_idx =%d , ring_res =%d\n",
__FUNCTION__,
_os_atomic_read(drv_priv, &ring->phl_idx),
_os_atomic_read(drv_priv, &ring->core_idx),
ring_res);
if (ring_res > 0) {
rptr = rptr + 1;
if (rptr >= MAX_PHL_RING_ENTRY_NUM) {
rptr=0;
recvpkt = (struct rtw_recv_pkt *)ring->entry[rptr];
ring->entry[rptr]=NULL;
_os_atomic_set(drv_priv, &ring->core_idx, 0);
} else {
recvpkt = (struct rtw_recv_pkt *)ring->entry[rptr];
ring->entry[rptr]=NULL;
_os_atomic_inc(drv_priv, &ring->core_idx);
}
if (NULL == recvpkt)
PHL_TRACE(COMP_PHL_RECV, _PHL_WARNING_, "recvpkt is NULL!\n");
else
phl_rx_statistics(phl_info, recvpkt);
} else {
PHL_TRACE(COMP_PHL_RECV, _PHL_INFO_, "no available rx packet to query!\n");
}
}
return recvpkt;
}
enum rtw_phl_status rtw_phl_return_rxbuf(void *phl, u8* recvpkt)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct rtw_phl_rx_pkt *phl_rx = NULL;
struct rtw_recv_pkt *r = (struct rtw_recv_pkt *)recvpkt;
do {
if (NULL == recvpkt)
break;
phl_rx = phl_container_of(r, struct rtw_phl_rx_pkt, r);
phl_recycle_rx_buf(phl_info, phl_rx);
pstatus = RTW_PHL_STATUS_SUCCESS;
} while (false);
return pstatus;
}
enum rtw_phl_status rtw_phl_start_rx_process(void *phl)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
FUNCIN_WSTS(pstatus);
pstatus = phl_schedule_handler(phl_info->phl_com,
&phl_info->phl_rx_handler);
FUNCOUT_WSTS(pstatus);
return pstatus;
}
void rtw_phl_rx_bar(void *phl, struct rtw_phl_stainfo_t *sta, u8 tid, u16 seq)
{
/* ref ieee80211_rx_h_ctrl() and wil_rx_bar() */
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
void *drv_priv = phl_to_drvpriv(phl_info);
struct phl_tid_ampdu_rx *r;
_os_list frames;
INIT_LIST_HEAD(&frames);
_os_spinlock(drv_priv, &sta->tid_rx_lock, _bh, NULL);
r = sta->tid_rx[tid];
if (!r) {
PHL_TRACE(COMP_PHL_RECV, _PHL_ERR_, "BAR for non-existing TID %d\n", tid);
goto out;
}
if (seq_less(seq, r->head_seq_num)) {
PHL_TRACE(COMP_PHL_RECV, _PHL_ERR_, "BAR Seq 0x%03x preceding head 0x%03x\n",
seq, r->head_seq_num);
goto out;
}
PHL_TRACE(COMP_PHL_RECV, _PHL_INFO_, "BAR: TID %d Seq 0x%03x head 0x%03x\n",
tid, seq, r->head_seq_num);
phl_release_reorder_frames(phl_info, r, seq, &frames);
phl_handle_rx_frame_list(phl_info, &frames);
out:
_os_spinunlock(drv_priv, &sta->tid_rx_lock, _bh, NULL);
}
enum rtw_phl_status
rtw_phl_enter_mon_mode(void *phl, struct rtw_wifi_role_t *wrole)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
enum rtw_hal_status status;
status = rtw_hal_enter_mon_mode(phl_info->hal, wrole->hw_band);
if (status != RTW_HAL_STATUS_SUCCESS) {
PHL_TRACE(COMP_PHL_RECV, _PHL_ERR_,
"%s(): rtw_hal_enter_mon_mode() failed, status=%d",
__FUNCTION__, status);
return RTW_PHL_STATUS_FAILURE;
}
return RTW_PHL_STATUS_SUCCESS;
}
enum rtw_phl_status
rtw_phl_leave_mon_mode(void *phl, struct rtw_wifi_role_t *wrole)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
enum rtw_hal_status status;
status = rtw_hal_leave_mon_mode(phl_info->hal, wrole->hw_band);
if (status != RTW_HAL_STATUS_SUCCESS) {
PHL_TRACE(COMP_PHL_RECV, _PHL_ERR_,
"%s(): rtw_hal_leave_mon_mode() failed, status=%d",
__FUNCTION__, status);
return RTW_PHL_STATUS_FAILURE;
}
return RTW_PHL_STATUS_SUCCESS;
}
#ifdef CONFIG_PHL_RX_PSTS_PER_PKT
void
_phl_rx_proc_frame_list(struct phl_info_t *phl_info, struct phl_queue *pq)
{
void *d = phl_to_drvpriv(phl_info);
_os_list *pkt_list = NULL;
struct rtw_phl_rx_pkt *phl_rx = NULL;
if (NULL == pq)
return;
if (0 == pq->cnt)
return;
PHL_TRACE(COMP_PHL_PSTS, _PHL_INFO_,
"_phl_rx_proc_frame_list : queue ele cnt = %d\n",
pq->cnt);
while (true == pq_pop(d, pq, &pkt_list, _first, _bh)) {
phl_rx = (struct rtw_phl_rx_pkt *)pkt_list;
phl_info->hci_trx_ops->rx_handle_normal(phl_info, phl_rx);
}
}
enum rtw_phl_status
phl_rx_proc_phy_sts(struct phl_info_t *phl_info, struct rtw_phl_rx_pkt *ppdu_sts)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
struct rtw_phl_ppdu_sts_info *psts_info = &(phl_info->phl_com->ppdu_sts_info);
struct rtw_phl_ppdu_sts_ent *sts_entry = NULL;
struct rtw_phl_rx_pkt *phl_rx = NULL;
void *d = phl_to_drvpriv(phl_info);
struct rtw_phl_rssi_stat *rssi_stat = &phl_info->phl_com->rssi_stat;
_os_list *frame = NULL;
bool upt_psts = true;
u8 i = 0;
enum phl_band_idx band = HW_BAND_0;
if (NULL == ppdu_sts)
return pstatus;
if (false == psts_info->en_psts_per_pkt) {
return pstatus;
}
if (ppdu_sts->r.mdata.ppdu_cnt >= PHL_MAX_PPDU_CNT) {
PHL_TRACE(COMP_PHL_PSTS, _PHL_INFO_,
"ppdu_sts->r.mdata.ppdu_cnt >= PHL_MAX_PPDU_CNT!\n");
return pstatus;
}
band = (ppdu_sts->r.mdata.bb_sel > 0) ? HW_BAND_1 : HW_BAND_0;
if (false == psts_info->en_ppdu_sts[band])
return pstatus;
if (ppdu_sts->r.mdata.ppdu_cnt != psts_info->cur_ppdu_cnt[band]) {
PHL_TRACE(COMP_PHL_PSTS, _PHL_INFO_,
"ppdu_sts->r.mdata.ppdu_cnt != psts_info->cur_ppdu_cnt!\n");
upt_psts = false;
}
sts_entry = &psts_info->sts_ent[band][psts_info->cur_ppdu_cnt[band]];
/* check list empty */
if (0 == sts_entry->frames.cnt) {
PHL_TRACE(COMP_PHL_PSTS, _PHL_INFO_,
"cur_ppdu_cnt %d --> sts_entry->frames.cnt = 0\n",
psts_info->cur_ppdu_cnt[band]);
pstatus = RTW_PHL_STATUS_SUCCESS;
return pstatus;
}
/* start update phy info to per pkt*/
if (false == pq_get_front(d, &sts_entry->frames, &frame, _bh)) {
PHL_ERR(" %s list empty\n", __FUNCTION__);
return pstatus;
}
/**
* TODO : How to filter the case :
* pkt(ppdu_cnt = 0) --> missing :psts(ppdu_cnt = 0) --> (all of the pkt, psts dropped/missing)
* --> ppdu_sts(ppdu_cnt = 0)(not for the current buffered pkt.)
* workaround : check rate/bw/ppdu_type/... etc
**/
phl_rx = (struct rtw_phl_rx_pkt *)frame;
if (upt_psts &&
((phl_rx->r.mdata.rx_rate != ppdu_sts->r.mdata.rx_rate) ||
(phl_rx->r.mdata.bw != ppdu_sts->r.mdata.bw) ||
(phl_rx->r.mdata.rx_gi_ltf != ppdu_sts->r.mdata.rx_gi_ltf) ||
(phl_rx->r.mdata.ppdu_type != ppdu_sts->r.mdata.ppdu_type))) {
/**
* ppdu status is not for the buffered pkt,
* skip update phy status to phl_rx
**/
upt_psts = false;
}
/* Get Frame Type */
ppdu_sts->r.phy_info.frame_type =
PHL_GET_80211_HDR_TYPE(phl_rx->r.pkt_list[0].vir_addr);
if ((false == ppdu_sts->r.phy_info.is_valid) &&
(true == psts_info->en_fake_psts)) {
if (RTW_FRAME_TYPE_MGNT == phl_rx->r.mdata.frame_type) {
ppdu_sts->r.phy_info.rssi =
rssi_stat->ma_rssi[RTW_RSSI_MGNT_ACAM_A1M];
} else if (RTW_FRAME_TYPE_DATA == phl_rx->r.mdata.frame_type) {
ppdu_sts->r.phy_info.rssi =
rssi_stat->ma_rssi[RTW_RSSI_DATA_ACAM_A1M];
} else if (RTW_FRAME_TYPE_CTRL == phl_rx->r.mdata.frame_type) {
ppdu_sts->r.phy_info.rssi =
rssi_stat->ma_rssi[RTW_RSSI_CTRL_ACAM_A1M];
} else {
ppdu_sts->r.phy_info.rssi =
rssi_stat->ma_rssi[RTW_RSSI_UNKNOWN];
}
for(i = 0; i< RTW_PHL_MAX_RF_PATH ; i++) {
ppdu_sts->r.phy_info.rssi_path[i] =
ppdu_sts->r.phy_info.rssi;
}
ppdu_sts->r.phy_info.ch_idx = rtw_hal_get_cur_ch(phl_info->hal,
phl_rx->r.mdata.bb_sel);
ppdu_sts->r.phy_info.is_valid = true;
}
do {
if (false == upt_psts)
break;
phl_rx = (struct rtw_phl_rx_pkt *)frame;
_os_mem_cpy(d, &(phl_rx->r.phy_info), &(ppdu_sts->r.phy_info),
sizeof(struct rtw_phl_ppdu_phy_info));
} while ((true == psts_info->psts_ampdu) &&
(pq_get_next(d, &sts_entry->frames, frame, &frame, _bh)));
/*2. indicate the frame list*/
_phl_rx_proc_frame_list(phl_info, &sts_entry->frames);
/*3. reset the queue */
pq_reset(d, &(sts_entry->frames), _bh);
return pstatus;
}
bool
phl_rx_proc_wait_phy_sts(struct phl_info_t *phl_info,
struct rtw_phl_rx_pkt *phl_rx)
{
struct rtw_phl_ppdu_sts_info *psts_info = &(phl_info->phl_com->ppdu_sts_info);
struct rtw_phl_ppdu_sts_ent *sts_entry = NULL;
void *d = phl_to_drvpriv(phl_info);
u8 i = 0;
bool ret = false;
enum phl_band_idx band = HW_BAND_0;
if (false == psts_info->en_psts_per_pkt) {
return ret;
}
if (phl_rx->r.mdata.ppdu_cnt >= PHL_MAX_PPDU_CNT) {
PHL_ASSERT("phl_rx->r.mdata.ppdu_cnt >= PHL_MAX_PPDU_CNT!");
return ret;
}
band = (phl_rx->r.mdata.bb_sel > 0) ? HW_BAND_1 : HW_BAND_0;
if (false == psts_info->en_ppdu_sts[band])
return ret;
if (psts_info->cur_ppdu_cnt[band] != phl_rx->r.mdata.ppdu_cnt) {
/* start of PPDU */
/* 1. Check all of the buffer list is empty */
/* only check the target rx pkt band */
for (i = 0; i < PHL_MAX_PPDU_CNT; i++) {
sts_entry = &psts_info->sts_ent[band][i];
if (0 != sts_entry->frames.cnt) {
/* need indicate first */
PHL_TRACE(COMP_PHL_PSTS, _PHL_INFO_,
"band %d ; ppdu_cnt %d queue is not empty \n",
band, i);
_phl_rx_proc_frame_list(phl_info,
&sts_entry->frames);
pq_reset(d, &(sts_entry->frames), _bh);
}
}
/* 2. check ppdu status filter condition */
/* Filter function is supportted only if rxd = long_rxd */
if ((1 == phl_rx->r.mdata.long_rxd) &&
(0 != (psts_info->ppdu_sts_filter &
BIT(phl_rx->r.mdata.frame_type)))) {
/* 3. add new rx pkt to the tail of the queue */
sts_entry = &psts_info->sts_ent[band][phl_rx->r.mdata.ppdu_cnt];
pq_reset(d, &(sts_entry->frames), _bh);
pq_push(d, &(sts_entry->frames), &phl_rx->list,
_tail, _bh);
ret = true;
}
psts_info->cur_ppdu_cnt[band] = phl_rx->r.mdata.ppdu_cnt;
} else {
/* 1. check ppdu status filter condition */
/* Filter function is supportted only if rxd = long_rxd */
if ((1 == phl_rx->r.mdata.long_rxd) &&
(0 != (psts_info->ppdu_sts_filter &
BIT(phl_rx->r.mdata.frame_type)))) {
/* 2. add to frame list */
sts_entry = &psts_info->sts_ent[band][phl_rx->r.mdata.ppdu_cnt];
if (0 == sts_entry->frames.cnt) {
PHL_TRACE(COMP_PHL_PSTS, _PHL_INFO_,
"MPDU is not the start of PPDU, but the queue is empty!!!\n");
}
pq_push(d, &(sts_entry->frames), &phl_rx->list,
_tail, _bh);
ret = true;
}
}
return ret;
}
#endif
#ifdef CONFIG_PHY_INFO_NTFY
void _phl_rx_post_proc_ppdu_sts(void* priv, struct phl_msg* msg)
{
struct phl_info_t *phl_info = (struct phl_info_t *)priv;
if (msg->inbuf && msg->inlen){
_os_kmem_free(phl_to_drvpriv(phl_info), msg->inbuf, msg->inlen);
}
}
bool
_phl_rx_proc_aggr_psts_ntfy(struct phl_info_t *phl_info,
struct rtw_phl_ppdu_sts_ent *ppdu_sts_ent)
{
struct rtw_phl_ppdu_sts_info *ppdu_info =
&phl_info->phl_com->ppdu_sts_info;
struct rtw_phl_ppdu_sts_ntfy *psts_ntfy = NULL;
u8 i = 0;
bool ret = false;
if (ppdu_info->msg_aggr_cnt == 0) {
/* reset entry valid status */
for (i = 0; i < MAX_PSTS_MSG_AGGR_NUM; i++) {
ppdu_info->msg_aggr_buf[i].vld = false;
}
}
/* copy to the buf */
psts_ntfy = &ppdu_info->msg_aggr_buf[ppdu_info->msg_aggr_cnt];
psts_ntfy->frame_type = ppdu_sts_ent->frame_type;
_os_mem_cpy(phl_info->phl_com->drv_priv,
&psts_ntfy->phy_info,
&ppdu_sts_ent->phy_info,
sizeof(struct rtw_phl_ppdu_phy_info));
_os_mem_cpy(phl_info->phl_com->drv_priv,
psts_ntfy->src_mac_addr,
ppdu_sts_ent->src_mac_addr,
MAC_ADDRESS_LENGTH);
psts_ntfy->vld = true;
/* update counter */
ppdu_info->msg_aggr_cnt++;
if (ppdu_info->msg_aggr_cnt >= MAX_PSTS_MSG_AGGR_NUM) {
ppdu_info->msg_aggr_cnt = 0;
ret = true;
}
return ret;
}
#endif
void
phl_rx_proc_ppdu_sts(struct phl_info_t *phl_info, struct rtw_phl_rx_pkt *phl_rx)
{
u8 i = 0;
struct rtw_phl_ppdu_sts_info *ppdu_info = NULL;
struct rtw_phl_ppdu_sts_ent *ppdu_sts_ent = NULL;
struct rtw_phl_stainfo_t *psta = NULL;
#ifdef CONFIG_PHY_INFO_NTFY
struct rtw_phl_ppdu_sts_ntfy *psts_ntfy;
void *d = phl_to_drvpriv(phl_info);
#endif
enum phl_band_idx band = HW_BAND_0;
struct rtw_rssi_info *rssi_sts;
if ((NULL == phl_info) || (NULL == phl_rx))
return;
band = (phl_rx->r.mdata.bb_sel > 0) ? HW_BAND_1 : HW_BAND_0;
ppdu_info = &phl_info->phl_com->ppdu_sts_info;
ppdu_sts_ent = &ppdu_info->sts_ent[band][phl_rx->r.mdata.ppdu_cnt];
if (false == ppdu_sts_ent->valid)
return;
if (true == ppdu_sts_ent->phl_done)
return;
ppdu_sts_ent->phl_done = true;
/* update phl self varibles */
for(i = 0 ; i < ppdu_sts_ent->usr_num; i++) {
if (ppdu_sts_ent->sta[i].vld) {
psta = rtw_phl_get_stainfo_by_macid(phl_info,
ppdu_sts_ent->sta[i].macid);
if (psta == NULL)
continue;
rssi_sts = &psta->hal_sta->rssi_stat;
STA_UPDATE_MA_RSSI_FAST(rssi_sts->ma_rssi, ppdu_sts_ent->phy_info.rssi);
/* update (re)associate req/resp pkt rssi */
if (RTW_IS_ASOC_PKT(ppdu_sts_ent->frame_type)) {
rssi_sts->assoc_rssi =
ppdu_sts_ent->phy_info.rssi;
}
if (RTW_IS_BEACON_OR_PROBE_RESP_PKT(
ppdu_sts_ent->frame_type)) {
if (0 == rssi_sts->ma_rssi_mgnt) {
rssi_sts->ma_rssi_mgnt =
ppdu_sts_ent->phy_info.rssi;
} else {
STA_UPDATE_MA_RSSI_FAST(
rssi_sts->ma_rssi_mgnt,
ppdu_sts_ent->phy_info.rssi);
}
}
}
else {
if (RTW_IS_ASOC_REQ_PKT(ppdu_sts_ent->frame_type) &&
(ppdu_sts_ent->usr_num == 1)) {
psta = rtw_phl_get_stainfo_by_addr_ex(phl_info,
ppdu_sts_ent->src_mac_addr);
if (psta) {
psta->hal_sta->rssi_stat.assoc_rssi =
ppdu_sts_ent->phy_info.rssi;
#ifdef DBG_AP_CLIENT_ASSOC_RSSI
PHL_INFO("%s [Rx-ASOC_REQ] - macid:%d, MAC-Addr:%02x-%02x-%02x-%02x-%02x-%02x, assoc_rssi:%d\n",
__func__,
psta->macid,
ppdu_sts_ent->src_mac_addr[0],
ppdu_sts_ent->src_mac_addr[1],
ppdu_sts_ent->src_mac_addr[2],
ppdu_sts_ent->src_mac_addr[3],
ppdu_sts_ent->src_mac_addr[4],
ppdu_sts_ent->src_mac_addr[5],
psta->hal_sta->rssi_stat.assoc_rssi);
#endif
}
}
}
}
#ifdef CONFIG_PHY_INFO_NTFY
/*2. prepare and send psts notify to core */
if((RTW_FRAME_TYPE_BEACON == ppdu_sts_ent->frame_type) ||
(RTW_FRAME_TYPE_PROBE_RESP == ppdu_sts_ent->frame_type)) {
if (false == _phl_rx_proc_aggr_psts_ntfy(phl_info,
ppdu_sts_ent)) {
return;
}
/* send aggr psts ntfy*/
psts_ntfy = (struct rtw_phl_ppdu_sts_ntfy *)_os_kmem_alloc(d,
MAX_PSTS_MSG_AGGR_NUM * sizeof(struct rtw_phl_ppdu_sts_ntfy));
if (psts_ntfy == NULL) {
PHL_ERR("%s: alloc ppdu sts for ntfy fail.\n", __func__);
return;
}
_os_mem_cpy(phl_info->phl_com->drv_priv,
psts_ntfy,
&ppdu_info->msg_aggr_buf,
(MAX_PSTS_MSG_AGGR_NUM *
sizeof(struct rtw_phl_ppdu_sts_ntfy)));
msg.inbuf = (u8 *)psts_ntfy;
msg.inlen = (MAX_PSTS_MSG_AGGR_NUM *
sizeof(struct rtw_phl_ppdu_sts_ntfy));
SET_MSG_MDL_ID_FIELD(msg.msg_id, PHL_MDL_PSTS);
SET_MSG_EVT_ID_FIELD(msg.msg_id, MSG_EVT_RX_PSTS);
attr.completion.completion = _phl_rx_post_proc_ppdu_sts;
attr.completion.priv = phl_info;
if (phl_msg_hub_send(phl_info, &attr, &msg) != RTW_PHL_STATUS_SUCCESS) {
PHL_ERR("%s: send msg_hub failed\n", __func__);
_os_kmem_free(d, psts_ntfy,
(MAX_PSTS_MSG_AGGR_NUM *
sizeof(struct rtw_phl_ppdu_sts_ntfy)));
}
}
#endif
}
static void _dump_rx_reorder_info(struct phl_info_t *phl_info,
struct rtw_phl_stainfo_t *sta)
{
void *drv_priv = phl_to_drvpriv(phl_info);
_os_spinlockfg sp_flags;
u8 i;
PHL_INFO("dump rx reorder buffer info:\n");
for (i = 0; i < ARRAY_SIZE(sta->tid_rx); i++) {
_os_spinlock(drv_priv, &sta->tid_rx_lock, _irq, &sp_flags);
if (sta->tid_rx[i]) {
PHL_INFO("== tid = %d ==\n", sta->tid_rx[i]->tid);
PHL_INFO("head_seq_num = %d\n",
sta->tid_rx[i]->head_seq_num);
PHL_INFO("stored_mpdu_num = %d\n",
sta->tid_rx[i]->stored_mpdu_num);
PHL_INFO("ssn = %d\n", sta->tid_rx[i]->ssn);
PHL_INFO("buf_size = %d\n", sta->tid_rx[i]->buf_size);
PHL_INFO("started = %d\n", sta->tid_rx[i]->started);
PHL_INFO("removed = %d\n", sta->tid_rx[i]->removed);
}
_os_spinunlock(drv_priv, &sta->tid_rx_lock, _irq, &sp_flags);
}
}
void phl_dump_all_sta_rx_info(struct phl_info_t *phl_info)
{
struct rtw_phl_com_t *phl_com = phl_info->phl_com;
struct rtw_phl_stainfo_t *sta = NULL;
struct rtw_wifi_role_t *role = NULL;
void *drv = phl_to_drvpriv(phl_info);
struct phl_queue *sta_queue;
_os_spinlockfg sp_flags;
u8 i;
PHL_INFO("dump all sta rx info:\n");
for (i = 0; i < MAX_WIFI_ROLE_NUMBER; i++) {
role = &phl_com->wifi_roles[i];
if (role->active) {
PHL_INFO("wrole idx = %d\n", i);
PHL_INFO("wrole type = %d\n", role->type);
PHL_INFO("wrole mstate = %d\n", role->mstate);
sta_queue = &role->assoc_sta_queue;
_os_spinlock(drv, &sta_queue->lock, _irq, &sp_flags);
phl_list_for_loop(sta, struct rtw_phl_stainfo_t,
&sta_queue->queue, list) {
PHL_INFO("%s MACID:%d %02x:%02x:%02x:%02x:%02x:%02x \n",
__func__, sta->macid,
sta->mac_addr[0],
sta->mac_addr[1],
sta->mac_addr[2],
sta->mac_addr[3],
sta->mac_addr[4],
sta->mac_addr[5]);
_dump_rx_reorder_info(phl_info, sta);
}
_os_spinunlock(drv, &sta_queue->lock, _irq, &sp_flags);
}
}
}
void phl_rx_dbg_dump(struct phl_info_t *phl_info, u8 band_idx)
{
enum rtw_phl_status phl_status = RTW_PHL_STATUS_FAILURE;
phl_status = phl_cmd_enqueue(phl_info,
band_idx,
MSG_EVT_DBG_RX_DUMP,
NULL,
0,
NULL,
PHL_CMD_NO_WAIT,
0);
if (phl_status != RTW_PHL_STATUS_SUCCESS) {
PHL_TRACE(COMP_PHL_DBG, _PHL_ERR_, "%s: cmd enqueue fail!\n",
__func__);
}
}
|
2301_81045437/rtl8852be
|
phl/phl_rx.c
|
C
|
agpl-3.0
| 47,961
|
/******************************************************************************
*
* Copyright(c) 2019 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#ifndef _PHL_RX_H_
#define _PHL_RX_H_
struct phl_rx_pkt_pool {
struct rtw_phl_rx_pkt phl_rx[MAX_PHL_RING_RX_PKT_NUM];
_os_list idle;
_os_list busy;
_os_lock idle_lock; /* spinlock */
_os_lock busy_lock; /* spinlock */
u32 idle_cnt;
};
enum rtw_phl_status phl_rx_init(struct phl_info_t *phl_info);
void phl_rx_deinit(struct phl_info_t *phl_info);
u8 phl_check_recv_ring_resource(struct phl_info_t *phl_info);
struct rtw_phl_rx_pkt *rtw_phl_query_phl_rx(void *phl);
u8 rtw_phl_is_phl_rx_idle(struct phl_info_t *phl_info);
void phl_release_phl_rx(struct phl_info_t *phl_info,
struct rtw_phl_rx_pkt *phl_rx);
enum rtw_phl_status phl_rx_reorder(struct phl_info_t *phl_info,
struct rtw_phl_rx_pkt *phl_rx,
_os_list *frames);
void phl_rx_deinit(struct phl_info_t *phl_info);
void phl_recycle_rx_buf(struct phl_info_t *phl_info,
struct rtw_phl_rx_pkt *phl_rx);
void phl_event_indicator(void *context);
enum rtw_phl_status rtw_phl_start_rx_process(void *phl);
void _phl_indic_new_rxpkt(struct phl_info_t *phl_info);
void
phl_handle_rx_frame_list(struct phl_info_t *phl_info,
_os_list *frames);
void phl_sta_rx_reorder_timer_expired(void *t);
void phl_rx_traffic_upd(struct rtw_stats *sts);
void phl_rx_watchdog(struct phl_info_t *phl_info);
#ifdef CONFIG_PHL_RX_PSTS_PER_PKT
enum rtw_phl_status
phl_rx_proc_phy_sts(struct phl_info_t *phl_info, struct rtw_phl_rx_pkt *ppdu_sts);
bool
phl_rx_proc_wait_phy_sts(struct phl_info_t *phl_info,
struct rtw_phl_rx_pkt *phl_rx);
#endif
void phl_rx_proc_ppdu_sts(struct phl_info_t *phl_info,
struct rtw_phl_rx_pkt *phl_rx);
void phl_reset_rx_stats(struct rtw_stats *stats);
void phl_dump_all_sta_rx_info(struct phl_info_t *phl_info);
u16 rtw_phl_query_new_rx_num(void *phl);
void phl_rx_dbg_dump(struct phl_info_t *phl_info, u8 band_idx);
#endif /*_PHL_RX_H_*/
|
2301_81045437/rtl8852be
|
phl/phl_rx.h
|
C
|
agpl-3.0
| 2,567
|
/******************************************************************************
*
* Copyright(c) 2019 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#define _PHL_RX_AGG_C_
#include "phl_headers.h"
/**
* phl_tid_ampdu_rx_free() - Free the tid ampdu rx entry specified by @r.
*
* The caller has to take care of potential race condition:
* - if it is in @rtw_phl_stainfo_t.tid_rx, should be called with
* @rtw_phl_stainfo_t.tid_rx_lock held
* - if it is not in @rtw_phl_stainfo_t.tid_rx, it can be called freely since
* other part of the code can't see it
*
* @phl_tid_ampdu_rx.removed can be set to make sure that the reorder_time will
* not be set again. This is usful when canceling the timer synchronously
* before releasing it.
*
* TODO: On macos, _os_kmem_free() should not be called with lock held since it
* may block.
*/
void phl_tid_ampdu_rx_free(struct phl_tid_ampdu_rx *r)
{
/* ref wil_tid_ampdu_rx_free() and ieee80211_free_tid_rx() */
u16 buf_size;
void *drv_priv;
int i;
struct rtw_phl_rx_pkt *pkt = NULL;
struct phl_hci_trx_ops *hci_trx_ops = NULL;
if (!r)
return;
buf_size = r->buf_size;
drv_priv = r->drv_priv;
hci_trx_ops = r->phl_info->hci_trx_ops;
for (i = 0; i < r->buf_size; i++) {
pkt = r->reorder_buf[i];
if (NULL != pkt)
hci_trx_ops->recycle_rx_pkt(r->phl_info, pkt);
}
_os_kmem_free(drv_priv, r->reorder_time,
buf_size * sizeof(u32));
_os_kmem_free(drv_priv, r->reorder_buf,
buf_size * sizeof(struct rtw_phl_rx_pkt *));
_os_kmem_free(drv_priv, r, sizeof(*r));
}
static inline void
_phl_cancel_rx_reorder_timer(struct phl_info_t *phl_info,
struct rtw_phl_stainfo_t *sta)
{
void *drv = phl_to_drvpriv(phl_info);
#ifdef PHL_PLATFORM_WINDOWS
/* Cancel the reorder_timer of the stainfo synchronously.
* Note that on Windows, _os_cancel_timer() does not guarantee that
* after the cancel, no timer callback will ever be called. Therefore
* @comp_sync is used for waiting this residual timer callback on
* Windows.*/
_os_event_reset(drv, &sta->comp_sync);
_os_cancel_timer(drv, &sta->reorder_timer);
_os_event_wait(drv, &sta->comp_sync, PHL_REORDER_TIMER_SYNC_TO_MS);
#else /*PHL_PLATFORM_LINUX && PHL_PLATFORM_AP*/
_os_cancel_timer(drv, &sta->reorder_timer);/*or _os_cancel_timer_async*/
#endif
}
void phl_free_rx_reorder(struct phl_info_t *phl_info,
struct rtw_phl_stainfo_t *sta)
{
void *drv = phl_to_drvpriv(phl_info);
u8 i = 0;
/* Mark the enrties as removed to make sure that reorder_timer of the
* stainfo will not be set again after the timer is canceled. */
_os_spinlock(drv, &sta->tid_rx_lock, _bh, NULL);
for (i = 0; i < ARRAY_SIZE(sta->tid_rx); i++)
if (sta->tid_rx[i])
sta->tid_rx[i]->removed = true;
_os_spinunlock(drv, &sta->tid_rx_lock, _bh, NULL);
_phl_cancel_rx_reorder_timer(phl_info, sta);
/* Free the tid ampdu rx entry */
_os_spinlock(drv, &sta->tid_rx_lock, _bh, NULL);
for (i = 0; i < ARRAY_SIZE(sta->tid_rx); i++) {
/* ref wil_disconnect_cid() */
if (!sta->tid_rx[i])
continue;
phl_tid_ampdu_rx_free(sta->tid_rx[i]);
sta->tid_rx[i] = NULL;
}
_os_spinunlock(drv, &sta->tid_rx_lock, _bh, NULL);
}
void rtw_phl_stop_rx_ba_session(void *phl, struct rtw_phl_stainfo_t *sta,
u16 tid)
{
/* ref wmi_evt_delba() and ___ieee80211_stop_rx_ba_session() */
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
void *drv_priv = phl_to_drvpriv(phl_info);
struct phl_tid_ampdu_rx *r;
if (NULL == sta) {
PHL_TRACE(COMP_PHL_RECV, _PHL_WARNING_,
"rtw_phl_stop_rx_ba_session: station info is NULL!\n");
return;
}
PHL_INFO("Stop rx BA session for sta=0x%p, tid=%u\n", sta, tid);
rtw_hal_stop_ba_session(phl_info->hal, sta, tid);
if (tid >= ARRAY_SIZE(sta->tid_rx))
return;
_os_spinlock(drv_priv, &sta->tid_rx_lock, _bh, NULL);
if (!sta->tid_rx[tid]) {
PHL_INFO("No active session found for the specified sta tid pair\n");
_os_spinunlock(drv_priv, &sta->tid_rx_lock, _bh, NULL);
return;
}
r = sta->tid_rx[tid];
sta->tid_rx[tid] = NULL;
r->removed = true;
/* Note that it is safe to free the tid ampdu rx here. If reorder_timer
* callback is invoked, the tid_rx_lock is held and it does not do
* anything to the entry if it is NULL. */
phl_tid_ampdu_rx_free(r);
_os_spinunlock(drv_priv, &sta->tid_rx_lock, _bh, NULL);
PHL_INFO("Rx BA session for sta=0x%p, tid=%u freed\n", sta, tid);
}
struct phl_tid_ampdu_rx *phl_tid_ampdu_rx_alloc(struct phl_info_t *phl_info,
struct rtw_phl_stainfo_t *sta,
u16 timeout, u16 ssn, u16 tid,
u16 buf_size)
{
/* ref wil_tid_ampdu_rx_alloc() */
void *drv_priv = phl_to_drvpriv(phl_info);
struct phl_tid_ampdu_rx *r;
/* allocate r */
r = _os_kmem_alloc(drv_priv, sizeof(*r));
if (!r)
return NULL;
_os_mem_set(drv_priv, r, 0, sizeof(*r));
/* allocate reorder_buf */
r->reorder_buf =
_os_kmem_alloc(drv_priv,
buf_size * sizeof(struct rtw_phl_rx_pkt *));
if (!r->reorder_buf) {
_os_kmem_free(drv_priv, r, sizeof(*r));
return NULL;
}
_os_mem_set(drv_priv, r->reorder_buf, 0,
buf_size * sizeof(struct rtw_phl_rx_pkt *));
/* allocate reorder_time */
r->reorder_time =
_os_kmem_alloc(drv_priv, buf_size * sizeof(u32));
if (!r->reorder_time) {
_os_kmem_free(drv_priv, r->reorder_buf,
buf_size * sizeof(struct rtw_phl_rx_pkt *));
_os_kmem_free(drv_priv, r, sizeof(*r));
return NULL;
}
_os_mem_set(drv_priv, r->reorder_time, 0,
buf_size * sizeof(u32));
/* init other fields */
r->sta = sta;
r->ssn = ssn;
r->head_seq_num = ssn;
r->buf_size = buf_size;
r->stored_mpdu_num = 0;
r->tid = tid;
r->started = false;
r->drv_priv = drv_priv;
r->phl_info = phl_info;
return r;
}
enum rtw_phl_status
rtw_phl_start_rx_ba_session(void *phl, struct rtw_phl_stainfo_t *sta,
u8 dialog_token, u16 timeout, u16 start_seq_num,
u16 ba_policy, u16 tid, u16 buf_size)
{
/* ref wil_addba_rx_request() and ___ieee80211_start_rx_ba_session() */
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
enum rtw_hal_status hal_sts = RTW_HAL_STATUS_FAILURE;
void *drv_priv = phl_to_drvpriv(phl_info);
struct phl_tid_ampdu_rx *r;
hal_sts = rtw_hal_start_ba_session(phl_info->hal, sta, dialog_token,
timeout, start_seq_num, ba_policy,
tid, buf_size);
/* TODO: sta status */
/* TODO: check sta capability */
PHL_INFO("Start rx BA session for sta=0x%p, tid=%u, buf_size=%u, timeout=%u\n",
sta, tid, buf_size, timeout);
/* apply policies */
if (ba_policy) {
PHL_ERR("BACK requested unsupported ba_policy == 1\n");
return RTW_PHL_STATUS_FAILURE;
}
/* apply buf_size */
if (buf_size == 0) {
PHL_INFO("Suggest BACK wsize %d\n", PHL_MAX_AGG_WSIZE);
buf_size = PHL_MAX_AGG_WSIZE;
}
/* allocate tid ampdu rx */
r = phl_tid_ampdu_rx_alloc(phl_info, sta, timeout, start_seq_num, tid,
buf_size);
if (!r) {
PHL_ERR("Failed to alloc tid ampdu rx\n");
return RTW_PHL_STATUS_RESOURCE;
}
/* apply */
_os_spinlock(drv_priv, &sta->tid_rx_lock, _bh, NULL);
if (sta->tid_rx[tid])
phl_tid_ampdu_rx_free(sta->tid_rx[tid]);
sta->tid_rx[tid] = r;
_os_spinunlock(drv_priv, &sta->tid_rx_lock, _bh, NULL);
return RTW_PHL_STATUS_SUCCESS;
}
|
2301_81045437/rtl8852be
|
phl/phl_rx_agg.c
|
C
|
agpl-3.0
| 8,068
|
/******************************************************************************
*
* Copyright(c) 2019 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#ifndef _PHL_RX_AGG_H_
#define _PHL_RX_AGG_H_
/*
* This value is used when removing a @phl_tid_ampdu_rx as a timeout value
* wating for an event passed as an argument to _os_event_wait(). Note that the
* timeout value 0 has a different meaning on Windows and Linux. See the
* implementation of _os_event_wait().
*/
#define PHL_REORDER_TIMER_SYNC_TO_MS 50
void phl_tid_ampdu_rx_free(struct phl_tid_ampdu_rx *r);
void phl_free_rx_reorder(struct phl_info_t *phl_info, struct rtw_phl_stainfo_t *sta);
struct phl_tid_ampdu_rx *phl_tid_ampdu_rx_alloc(struct phl_info_t *phl_info,
struct rtw_phl_stainfo_t *sta,
u16 timeout, u16 ssn, u16 tid,
u16 buf_size);
#endif /*_PHL_RX_AGG_H_*/
|
2301_81045437/rtl8852be
|
phl/phl_rx_agg.h
|
C
|
agpl-3.0
| 1,479
|
/******************************************************************************
*
* Copyright(c) 2019 - 2020 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#ifndef __PHL_SCAN_H__
#define __PHL_SCAN_H__
/* Header file for application to invoke scan service */
#define PHL_SSID_LEN 32
struct rtw_phl_ssid {
u32 ssid_len;
u8 ssid[PHL_SSID_LEN];
};
enum phl_scan_mode {
NULL_MODE, /* End of Scanning */
NORMAL_SCAN_MODE, /* OFF CHANNEL : non-operation channel*/
BACKOP_MODE,
P2P_SCAN_MODE,
P2P_LISTEN_MODE,
MAX_MODE,
};
/* Scan type including active and passive scan. */
enum rtw_phl_scan_type {
RTW_PHL_SCAN_PASSIVE,
RTW_PHL_SCAN_ACTIVE,
RTW_PHL_SCAN_MIX,
};
enum phl_ext_act_scan_state {
EXT_ACT_SCAN_DISABLE,
EXT_ACT_SCAN_ENABLE,
EXT_ACT_SCAN_TRIGGER,
EXT_ACT_SCAN_DONE,
};
struct phl_scan_channel {
_os_list list;
enum band_type band;
u16 channel; /* channel number */
u16 duration; /* 0: use default, otherwise: channel scan time */
enum channel_width bw; /* 0: use default 20Mhz */
enum chan_offset offset;
enum rtw_phl_scan_type type; /* active scan: 1, passive scan: 0 */
u8 scan_mode; /* according to phl_scan_mode */
u8 ext_act_scan; /* according to phl_ext_act_scan_state */
};
enum scan_result {
SCAN_REQ_ABORT, /* abort a non-started(queued) scan */
SCAN_REQ_CANCEL, /* cancel a started scan */
SCAN_REQ_COMPLETE /* scan complete */
};
enum scan_bkop_mode {
SCAN_BKOP_NONE,
SCAN_BKOP_CNT,
SCAN_BKOP_TIMER
};
#define SCAN_SSID_AMOUNT 9 /* for WEXT_CSCAN_AMOUNT 9 */
#define SCAN_CH_AMOUNT (14+37)
struct rtw_phl_scan_param {
u32 max_scan_time;
#ifdef CONFIG_PHL_CMD_SCAN_BKOP_TIME
u32 max_listen_time;
#endif
struct rtw_phl_ssid ssid[SCAN_SSID_AMOUNT];
u8 ssid_num;
u8 repeat; /* 255 means scan forever until cancel */
u8 ch_num;
struct phl_scan_channel *ch;
u32 ch_sz;
int ch_idx; /* current scaned cahnnel index, init vaule = (-1) */
/* back op param */
enum scan_bkop_mode back_op_mode;
u8 back_op_ch_cnt; /* every back_op_ch_cnt go back to op ch */
/* every 'back_op_off_ch_dur_ms' go back to op ch
* back_op_off_ch_dur_ms have to large than ch->duration
* 0 : not specify
*/
u16 back_op_ch_dur_ms; /* op ch stay time; 0 : use default value */
u16 back_op_off_ch_dur_ms;
u16 back_op_off_ch_ext_dur_ms; /* extend when off_ch_tx (MGNT_TX) */
struct rtw_phl_scan_ops *ops;
void *priv; /* ops private */
struct rtw_wifi_role_t *wifi_role;
/* scan fsm internal use
* represent current scanning channel
*/
struct phl_scan_channel *scan_ch;
#ifdef CONFIG_PHL_CMD_SCAN
struct phl_queue chlist;
struct phl_scan_channel back_op_ch[MAX_WIFI_ROLE_NUMBER];
_os_timer scan_timer; /* handle in phl_cmd_scan */
u8 fltr_mode; /* backup filter mode before off channel */
u8 state;
#endif
/* create for computing scan time */
u32 enqueue_time;
u32 start_time;
u32 end_time;
u32 total_scan_time;
u32 token; /* unique id, generated by rtw_phl_scan_request() */
const char *name;
enum scan_result result;
u32 ext_act_scan_period;
};
enum PRECEDE {
TO_TAIL, /* normal case */
TO_HEAD, /* insert to head */
IMMEDIATE /* cancel previous scan and run immediately */
};
struct rtw_phl_scan_ops {
int (*scan_issue_pbreq)(void *priv, struct rtw_phl_scan_param *param);
u8 (*scan_issue_null_data)(void *priv, u8 ridx, bool ps);
int (*scan_start)(void *priv, struct rtw_phl_scan_param *param);
int (*scan_ch_ready)(void *priv, struct rtw_phl_scan_param *param);
int (*scan_off_ch_tx)(void *priv,
struct rtw_phl_scan_param *param, void *data);
int (*scan_complete)(void *priv, struct rtw_phl_scan_param *param);
};
enum rtw_phl_status rtw_phl_scan_cancel(void *phl);
enum rtw_phl_status rtw_phl_scan_request(void *phl,
struct rtw_phl_scan_param *pbuf, enum PRECEDE order);
enum rtw_phl_status rtw_phl_scan_del_request(void *phl,
u32 token);
enum rtw_phl_status rtw_phl_scan_pause(void *phl);
enum rtw_phl_status rtw_phl_scan_resume(void *phl);
enum rtw_phl_status rtw_phl_scan_force_active_scan(void *phl,
u16 channel, u16 duration);
int rtw_phl_scan_inprogress_req_num(void *phl);
enum rtw_phl_status rtw_phl_scan_running_req(void *phl,
struct rtw_phl_scan_param **param);
bool rtw_phl_scan_scanning_req(void *phl, u32 token, u32 *token_running);
enum rtw_phl_status rtw_phl_scan_off_ch_tx(void *phl, void *data, int len);
/* CMD_DISP SCAN*/
enum rtw_phl_status rtw_phl_cmd_scan_request(void *phl,
struct rtw_phl_scan_param *param, enum PRECEDE order);
enum rtw_phl_status rtw_phl_cmd_scan_cancel(void *phl,
struct rtw_phl_scan_param *param);
int rtw_phl_cmd_scan_inprogress(void *phl, u8 band_idx);
#endif /* __PHL_SCAN_H__ */
|
2301_81045437/rtl8852be
|
phl/phl_scan.h
|
C
|
agpl-3.0
| 5,129
|
/******************************************************************************
*
* Copyright(c) 2019 - 2020 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* Author: vincent_fann@realtek.com
*
*****************************************************************************/
#include "phl_headers.h"
#ifdef CONFIG_FSM
#include "phl_scan.h"
#ifdef FSM_DBG_MEM_OVERWRITE
#define _os_kmem_alloc(a, b) fsm_kmalloc(b)
#define _os_kmem_free(a, b, c) fsm_kfree(b, c)
#endif
#define SCAN_PROBE_TIMES 3 /* issue max 3 probe_req per channel */
#define SCAN_PROBE_INTERVAL 10 /* preobe_req interval 10ms */
#define SCAN_NULL_PKT_TIME 50 /* 50ms */
#define ALL_TOKEN 0
#define MAX_POWER_ON_TIME 500
#define BACK_OP_CH_DUR_MS 100
#define ALM_MAX_SCAN_TIME 1
#define ALM_PROBE_REQ 2
#define ALM_NEXT_CH 3
#define ALM_BKOP_OFF_CH 4
#ifndef pstr
#define pstr(s) (s + _os_strlen((u8 *)s))
#endif
#ifndef lstr
#define lstr(s, l) (size_t)(l - _os_strlen((u8 *)s))
#endif
enum SCAN_EV_ID {
SCAN_EV_START,
SCAN_EV_REQ_PWR_OK,
SCAN_EV_REQ_PWR_FAIL,
SCAN_EV_REQ_PWR_TIMEOUT,
SCAN_EV_NEXT_CH,
SCAN_EV_NEXT_PROBE_REQ,
SCAN_EV_PAUSE,
SCAN_EV_RESUME,
SCAN_EV_FORCE_ACTIVE,
SCAN_EV_NOTIFY_PENDING_SCAN_REQ,
SCAN_EV_PS_ANN_DONE,
SCAN_EV_BKOP_OFF_CH_EXPIRE,
SCAN_EV_BKOP_ON_CH_EXPIRE,
SCAN_EV_NOTIFY_OFF_CH_CMD,
SCAN_EV_MAX
};
enum SCAN_STATE_ST {
SCAN_ST_IDLE,
SCAN_ST_REQ_PWR,
SCAN_ST_OFF_CH,
SCAN_ST_BACK_OP
};
static int scan_idle_st_hdl(void *obj, u16 event, void *param);
static int scan_req_pwr_st_hdl(void *obj, u16 event, void *param);
static int scan_off_ch_st_hdl(void *obj, u16 event, void *param);
static int scan_back_op_st_hdl(void *obj, u16 event, void *param);
/* STATE table */
static struct fsm_state_ent scan_state_tbl[] = {
ST_ENT(SCAN_ST_IDLE, scan_idle_st_hdl),
ST_ENT(SCAN_ST_REQ_PWR, scan_req_pwr_st_hdl),
ST_ENT(SCAN_ST_OFF_CH, scan_off_ch_st_hdl),
ST_ENT(SCAN_ST_BACK_OP, scan_back_op_st_hdl)
};
/* EVENT table */
static struct fsm_event_ent scan_event_tbl[] = {
EV_ENT(SCAN_EV_START),
EV_ENT(SCAN_EV_REQ_PWR_OK),
EV_ENT(SCAN_EV_REQ_PWR_FAIL),
EV_ENT(SCAN_EV_REQ_PWR_TIMEOUT),
EV_ENT(SCAN_EV_NEXT_CH),
EV_ENT(SCAN_EV_NEXT_PROBE_REQ),
EV_ENT(SCAN_EV_PAUSE),
EV_ENT(SCAN_EV_RESUME),
EV_ENT(SCAN_EV_FORCE_ACTIVE),
EV_ENT(SCAN_EV_NOTIFY_PENDING_SCAN_REQ),
EV_ENT(SCAN_EV_PS_ANN_DONE),
EV_ENT(SCAN_EV_BKOP_OFF_CH_EXPIRE),
EV_ENT(SCAN_EV_BKOP_ON_CH_EXPIRE),
EV_ENT(SCAN_EV_NOTIFY_OFF_CH_CMD),
EV_ENT(SCAN_EV_MAX)
};
struct scan_obj {
struct fsm_main *fsm;
struct fsm_obj *fsm_obj;
struct rtw_phl_scan_param *param;
struct phl_info_t *phl_info;
struct phl_queue req_q;
struct phl_queue off_ch_cmd_q;
u32 token; /* global increase when new scan request is comming */
u32 token_running; /* token of running scan */
u8 fltr_mode; /* backup filter mode before off channel */
u8 candidate; /* Guarantee we have one candidate */
u8 probe_cnts;
u8 off_ch_step;
/* back op */
bool back_op_is_required;
struct phl_scan_channel back_op_ch;
struct rtw_phl_scan_ops *ops;
};
/*
* SCAN state sub function
*/
#define OFF_CH_STEP_CLEAR 0x00
#define OFF_CH_SET_FILTER 0x01
#define OFF_CH_PAUSE_TX 0x02
void off_ch_set_step(struct scan_obj *pscan, u8 step)
{
pscan->off_ch_step |= step;
}
void off_ch_clr_step(struct scan_obj *pscan, u8 step)
{
pscan->off_ch_step &= ~step;
}
bool off_ch_chk_step(struct scan_obj *pscan, u8 step)
{
if (pscan->off_ch_step & step)
return true;
return false;
}
/*
* stop netif queue
* notify the AP about us leaving the channel and stop all STA interfaces.
* Stop queues and transmit all frames queued by the driver before
* sending nullfunc to enable powersave at the AP.
*/
static int drv_offchannel_stop_vifs(struct scan_obj *pscan)
{
// reference ieee80211_offchannel_stop_vifs();
/* TODO stop upper netif ac queues */
/* stop lower macid */
return 0;
}
/* return to OP channel */
static int off_ch_return_set_ch_bw(struct scan_obj *pscan)
{
enum rtw_phl_status phl_sts = RTW_PHL_STATUS_FAILURE;
struct phl_info_t *phl_info = pscan->phl_info;
struct rtw_wifi_role_t *wrole = pscan->param->wifi_role;
struct rtw_chan_def chandef = {0};
if (wrole == NULL)
return -1;
phl_sts = phl_mr_get_chandef(phl_info, wrole, false, &chandef);
if (phl_sts != RTW_PHL_STATUS_SUCCESS) {
PHL_ERR("%s phl_mr_get_chandef failed\n", __func__);
return -2;
}
PHL_DUMP_CHAN_DEF_EX(&chandef);
phl_set_ch_bw(wrole, &chandef, false);
FSM_INFO(pscan->fsm, "%s %s() channel=%d, bw=%d, offest=%d\n",
phl_fsm_obj_name(pscan->fsm_obj), __func__,
chandef.chan, chandef.bw, chandef.offset);
return 0;
}
#ifdef CONFIG_RTW_ACS
static void scan_acs_mntr_trigger(struct scan_obj *pscan,
struct phl_scan_channel *scan_ch)
{
u16 monitor_time = scan_ch->duration - MONITOR_TIME_TOLERANCE;
phl_acs_mntr_trigger(pscan->phl_info,
(u8)pscan->param->ch_idx, scan_ch->channel, monitor_time);
}
static void scan_acs_mntr_result(struct scan_obj *pscan)
{
if (pscan->param->ch_idx < 0)
return;
phl_acs_mntr_result(pscan->phl_info);
}
#endif /* CONFIG_RTW_ACS */
/* inform CORE ahd HAL SCAN is complete */
static int scan_complete(struct scan_obj *pscan)
{
struct phl_info_t *phl_info = (struct phl_info_t *)pscan->phl_info;
struct rtw_phl_scan_param *param = pscan->param;
u8 band_idx = 0;
enum phl_phy_idx phy_idx = HW_PHY_0;
if (param->ch_num == (pscan->param->ch_idx + 1))
param->result = SCAN_REQ_COMPLETE;
param->end_time = _os_get_cur_time_ms();
/* dump scan time */
param->total_scan_time =
phl_fsm_time_pass(param->enqueue_time);
FSM_INFO(pscan->fsm, "%s takes %d ms to scan %d/%d channels\n",
phl_fsm_obj_name(pscan->fsm_obj),
param->total_scan_time, param->ch_idx + 1, param->ch_num);
if (pscan->param->wifi_role)
band_idx = pscan->param->wifi_role->hw_band;
else
band_idx = 0;
if (band_idx == HW_BAND_1)
phy_idx = HW_PHY_1;
rtw_hal_btc_scan_finish_ntfy(phl_info->hal, phy_idx);
rtw_hal_notification(phl_info->hal, MSG_EVT_SCAN_END, band_idx);
phl_p2pps_noa_resume_all(phl_info, param->wifi_role);
#ifdef CONFIG_MCC_SUPPORT
if (phl_mr_coex_handle(phl_info, param->wifi_role, 0,
param->wifi_role->hw_band, MR_COEX_TRIG_BY_SCAN)
!= RTW_PHL_STATUS_SUCCESS) {
FSM_ERR(pscan->fsm,
"%s enable MCC failed\n",
phl_fsm_obj_name(pscan->fsm_obj));
return -1;
}
#endif /* CONFIG_MCC_SUPPORT */
if (pscan->ops && pscan->ops->scan_complete)
pscan->ops->scan_complete(pscan->param->priv, pscan->param);
return 0;
}
static void scan_set_timer(struct scan_obj *pscan, u32 duration, u16 event)
{
FSM_DBG(pscan->fsm, "%s %s() duration=%d\n",
phl_fsm_obj_name(pscan->fsm_obj), __func__, duration);
phl_fsm_set_alarm(pscan->fsm_obj, duration, event);
}
/* Announce null data if needing
* @pscan: scan obj
* @off_ch:
* true: go off channel (issue null-1)
* false: back to OP channel (issue null-0)
* need_wait:
* false: issue null packet is not necessary
* true: null packet issued (caller have to wait null data transmission)
*
* return: 0 success, negativ value: fail
*/
static int scan_off_chan(struct scan_obj *pscan, bool off_ch, bool *need_wait)
{
struct rtw_phl_com_t *phl_com = pscan->phl_info->phl_com;
struct mr_ctl_t *mr_ctl = phlcom_to_mr_ctrl(phl_com);
struct rtw_wifi_role_t *wrole = pscan->param->wifi_role;
struct hw_band_ctl_t *band_ctl = &(mr_ctl->band_ctrl[wrole->hw_band]);
enum rtw_phl_status psts = RTW_PHL_STATUS_SUCCESS;
*need_wait = false;
psts = phl_mr_offch_hdl(pscan->phl_info, pscan->param->wifi_role,
off_ch, pscan->param->priv, pscan->ops->scan_issue_null_data);
/* call back fail */
if (psts != RTW_PHL_STATUS_SUCCESS)
return -1;
if ((band_ctl->cur_info.ld_sta_num) > 0)
*need_wait = false;
/* ap is currently operating */
if ((band_ctl->cur_info.ap_num) > 0)
pscan->back_op_is_required = true;
if ((band_ctl->cur_info.ld_sta_num + band_ctl->cur_info.ld_ap_num) > 0)
pscan->back_op_is_required = true;
FSM_INFO(pscan->fsm, "%s %s() off_ch=%d, back_op_is_requred=%d\n",
phl_fsm_obj_name(pscan->fsm_obj), __func__,
off_ch, pscan->back_op_is_required);
return 0;
}
/* Move channel index and decide scan channel */
static struct phl_scan_channel *scan_select_channel(
struct scan_obj *pscan)
{
struct rtw_phl_scan_param *p = pscan->param;
if (p->ch_idx == -1)
goto next_ch;
/* back_op acording to op_ch_count */
if (p->back_op_mode == SCAN_BKOP_CNT &&
pscan->back_op_is_required && p->scan_ch &&
(p->scan_ch->scan_mode != BACKOP_MODE) &&
((p->ch_idx + 1) % p->back_op_ch_cnt == 0)) {
/* goto back op channel */
p->scan_ch = &pscan->back_op_ch;
goto done; /* do back_op */
} else if (p->back_op_mode == SCAN_BKOP_TIMER && p->scan_ch &&
(p->scan_ch->scan_mode == BACKOP_MODE)) {
/* we just return form op ch;
* stay at the same channel for now
*/
p->scan_ch = &p->ch[p->ch_idx];
goto done;
}
next_ch:
p->ch_idx++;
if (p->ch_idx == p->ch_num) {
/* no more channel for now */
if (p->repeat > 0) {
/* go back to the first channel */
p->ch_idx = 0;
FSM_INFO(pscan->fsm, "%s repeat=%d\n",
phl_fsm_obj_name(pscan->fsm_obj), p->repeat);
/* 255 means loop forever */
if (p->repeat != 255)
p->repeat--;
} else {
p->ch_idx--;
return NULL; /* we are done */
}
}
p->scan_ch = &p->ch[p->ch_idx];
if (p->scan_ch->scan_mode == BACKOP_MODE &&
pscan->back_op_is_required == false)
goto next_ch;
done:
FSM_INFO(pscan->fsm, "%s ch_num = %d/%d, ch = %d\n",
phl_fsm_obj_name(pscan->fsm_obj), (p->ch_idx + 1),
p->ch_num, p->scan_ch->channel);
return p->scan_ch;
}
/* Switch channel */
static void scan_set_channel_bw(struct scan_obj *pscan, u16 ch,
enum channel_width bw, enum chan_offset offset, u8 phy_idx)
{
struct rtw_chan_def chdef = {0};
FSM_INFO(pscan->fsm,
"%s %s() ch=%d, bw=%d, offest=%d, duration=%d\n",
phl_fsm_obj_name(pscan->fsm_obj), __func__,
ch, bw, offset, pscan->param->scan_ch->duration);
chdef.chan = (u8)ch;
chdef.bw = bw;
chdef.offset = offset;
phl_set_ch_bw(pscan->param->wifi_role, &chdef, false);
if (pscan->ops->scan_ch_ready)
pscan->ops->scan_ch_ready(pscan->param->priv, pscan->param);
}
/* call back caller to issue probe request */
static void scan_issue_probereq(struct scan_obj *pscan)
{
if (pscan->param->scan_ch->type == RTW_PHL_SCAN_PASSIVE)
return;
if (pscan->ops->scan_issue_pbreq)
pscan->ops->scan_issue_pbreq(pscan->param->priv, pscan->param);
}
/* flush NIC TX FIFO */
static int scan_flush_tx_queue(struct scan_obj *pscan)
{
enum rtw_hal_status hal_status = RTW_HAL_STATUS_SUCCESS;
hal_status = rtw_hal_scan_flush_queue(pscan->phl_info->hal,
pscan->param->wifi_role);
if (hal_status != RTW_HAL_STATUS_SUCCESS)
FSM_WARN(pscan->fsm, "%s flush tx queue fail\n",
phl_fsm_obj_name(pscan->fsm_obj));
return hal_status;
}
/* Pause NIC TX FIFO */
static int scan_pause_tx(struct scan_obj *pscan)
{
struct phl_info_t *phl_info = (struct phl_info_t *)pscan->phl_info;
enum rtw_hal_status hal_status = RTW_HAL_STATUS_SUCCESS;
bool off_ch = true;
/* we want to off-ch - pause TX */
hal_status = rtw_hal_scan_pause_tx_fifo(phl_info->hal,
pscan->param->wifi_role->hw_band, off_ch);
if (hal_status == RTW_HAL_STATUS_SUCCESS)
off_ch_set_step(pscan, OFF_CH_PAUSE_TX);
else
FSM_WARN(pscan->fsm, "%s pause tx fifo fail\n",
phl_fsm_obj_name(pscan->fsm_obj));
return hal_status;
}
static int scan_resume_tx(struct scan_obj *pscan)
{
struct phl_info_t *phl_info = (struct phl_info_t *)pscan->phl_info;
enum rtw_hal_status hal_status = RTW_HAL_STATUS_SUCCESS;
bool off_ch = false;
/* we want to op-ch - resume TX */
/* Do not resume un-paused TX */
if (!off_ch_chk_step(pscan, OFF_CH_PAUSE_TX))
return RTW_HAL_STATUS_SUCCESS;
hal_status = rtw_hal_scan_pause_tx_fifo(phl_info->hal,
pscan->param->wifi_role->hw_band, off_ch);
off_ch_clr_step(pscan, OFF_CH_PAUSE_TX);
if (hal_status != RTW_HAL_STATUS_SUCCESS)
FSM_WARN(pscan->fsm, "%s resume tx fifo fail\n",
phl_fsm_obj_name(pscan->fsm_obj));
return hal_status;
}
static int scan_set_filter(struct scan_obj *pscan)
{
enum rtw_hal_status hal_status = RTW_HAL_STATUS_SUCCESS;
bool off_ch = true;
hal_status = rtw_hal_scan_set_rxfltr_by_mode(pscan->phl_info->hal,
pscan->param->wifi_role->hw_band,
off_ch, &pscan->fltr_mode);
if (hal_status == RTW_HAL_STATUS_SUCCESS)
off_ch_set_step(pscan, OFF_CH_SET_FILTER);
return hal_status;
}
static int scan_clr_filter(struct scan_obj *pscan)
{
enum rtw_hal_status hal_status = RTW_HAL_STATUS_SUCCESS;
bool off_ch = false;
/* Do not clear clear filter */
if (!off_ch_chk_step(pscan, OFF_CH_SET_FILTER))
return hal_status;
hal_status = rtw_hal_scan_set_rxfltr_by_mode(pscan->phl_info->hal,
pscan->param->wifi_role->hw_band,
off_ch, &pscan->fltr_mode);
off_ch_clr_step(pscan, OFF_CH_SET_FILTER);
if (hal_status != RTW_HAL_STATUS_SUCCESS)
FSM_WARN(pscan->fsm, "%s clear filter fail\n",
phl_fsm_obj_name(pscan->fsm_obj));
return hal_status;
}
/* inform CORE and HAL SCAN is starting */
static int scan_start(struct scan_obj *pscan)
{
struct phl_info_t *phl_info = (struct phl_info_t *)pscan->phl_info;
struct rtw_hal_com_t *hal_com = rtw_hal_get_halcom(phl_info->hal);
struct rtw_phl_scan_param *param = pscan->param;
u32 pass_time, remain_time, end_time;
enum band_type band = BAND_ON_5G;
u8 band_idx = 0;
enum phl_phy_idx phy_idx = HW_PHY_0;
FSM_INFO(pscan->fsm, "%s %s()\n",
phl_fsm_obj_name(pscan->fsm_obj), __func__);
#ifdef CONFIG_MCC_SUPPORT
if (phl_mr_coex_disable(phl_info, param->wifi_role,
param->wifi_role->hw_band, MR_COEX_TRIG_BY_SCAN)
!= RTW_PHL_STATUS_SUCCESS) {
FSM_ERR(pscan->fsm,
"%s disable MCC failed\n",
phl_fsm_obj_name(pscan->fsm_obj));
return -1;
}
#endif /* CONFIG_MCC_SUPPORT */
phl_p2pps_noa_pause_all(phl_info, param->wifi_role);
param->start_time = _os_get_cur_time_ms();
/* check max scan time */
if (param->max_scan_time > 0) {
end_time = param->enqueue_time + param->max_scan_time;
pass_time = phl_fsm_time_pass(param->enqueue_time);
if (pass_time >= param->max_scan_time) {
FSM_ERR(pscan->fsm,
"%s Timeout! pass_time %d > max_time %d\n",
phl_fsm_obj_name(pscan->fsm_obj),
pass_time, param->max_scan_time);
return -1;
}
remain_time = phl_fsm_time_left(param->enqueue_time, end_time);
FSM_INFO(pscan->fsm,
"%s: max_time = %d ms, remain = %d ms\n",
phl_fsm_obj_name(pscan->fsm_obj),
param->max_scan_time, remain_time);
phl_fsm_set_alarm_ext(pscan->fsm_obj, remain_time,
FSM_EV_CANCEL, ALM_MAX_SCAN_TIME, NULL);
}
if (param->wifi_role)
band_idx = param->wifi_role->hw_band;
else
band_idx = 0;
if (band_idx == HW_BAND_1)
phy_idx = HW_PHY_1;
band = hal_com->band[band_idx].cur_chandef.band;
rtw_hal_btc_scan_start_ntfy(phl_info->hal, phy_idx, band);
rtw_hal_notification(phl_info->hal, MSG_EVT_SCAN_START, band_idx);
/* [scan start notify] */
if (pscan->ops->scan_start)
pscan->ops->scan_start(pscan->param->priv, param);
pscan->token_running = param->token;
pscan->param->result = SCAN_REQ_CANCEL;
return 0;
}
static void scan_free_req(struct scan_obj *pscan, struct fsm_msg *msg)
{
void *d = phl_to_drvpriv(pscan->phl_info);
struct rtw_phl_scan_param *scan_param =
(struct rtw_phl_scan_param *)msg->param;
if (msg->param != NULL) {
if (scan_param->ch != NULL)
_os_kmem_free(d, scan_param->ch, scan_param->ch_sz);
_os_kmem_free(d, msg->param, msg->param_sz);
}
_os_kmem_free(d, (void *)msg, sizeof(*msg));
}
static void scan_free_msg(struct scan_obj *pscan, struct fsm_msg *msg)
{
void *d = phl_to_drvpriv(pscan->phl_info);
_os_kmem_free(d, msg->param, msg->param_sz);
_os_kmem_free(d, msg, sizeof(*msg));
}
void scan_ps_cb(void *phl, void *hdl, void *ctx, enum rtw_phl_status stat)
{
struct scan_obj *pscan = (struct scan_obj *)ctx;
if (stat == RTW_PHL_STATUS_SUCCESS)
phl_fsm_gen_msg(phl,
pscan->fsm_obj, NULL, 0, SCAN_EV_REQ_PWR_OK);
else
phl_fsm_gen_msg(phl, pscan->fsm_obj,
NULL, 0, SCAN_EV_REQ_PWR_FAIL);
}
static bool chk_pending_req(struct scan_obj *pscan)
{
void *d = phl_to_drvpriv(pscan->phl_info);
_os_list *obj;
/* Make sure we only have one candidate */
if (pscan->candidate)
return true;
/* Check pending scan request
* Dequeue extra_queue and enqueue back to msg_queue
*/
if (!pq_pop(d, &pscan->req_q, &obj, _first, _bh))
return false;
pscan->candidate = 1;
if (phl_fsm_sent_msg(pscan->fsm_obj, (struct fsm_msg *)obj) !=
RTW_PHL_STATUS_SUCCESS) {
scan_free_req(pscan, (struct fsm_msg *)obj);
return false;
}
return true; /* has pending req */
}
/*
* SCAN state handler
*/
/*
* scan idle handler
*/
static int scan_idle_st_hdl(void *obj, u16 event, void *param)
{
struct scan_obj *pscan = (struct scan_obj *)obj;
struct rtw_phl_scan_param *scan_param;
void *d = phl_to_drvpriv(pscan->phl_info);
int rtn = FSM_FREE_PARAM;
switch (event) {
case FSM_EV_STATE_IN:
phl_fsm_cancel_alarm_ext(pscan->fsm_obj, ALM_MAX_SCAN_TIME);
scan_complete(pscan);
/* clear all used data */
_os_kmem_free(d, pscan->param->ch, pscan->param->ch_sz);
_os_kmem_free(d, pscan->param, sizeof(*pscan->param));
pscan->param = NULL;
pscan->token_running = 0;
pscan->back_op_is_required = false;
pscan->back_op_ch.duration = BACK_OP_CH_DUR_MS;
if (chk_pending_req(pscan))
break;
break;
case SCAN_EV_START:
/* save param from caller */
scan_param = (struct rtw_phl_scan_param *)param;
pscan->param = scan_param;
pscan->ops = scan_param->ops;
if (scan_param->back_op_ch_dur_ms)
pscan->back_op_ch.duration =
scan_param->back_op_ch_dur_ms;
/* Tell FSM framework DON'T free param
* Scan fsm will free it when scan is done
*/
rtn = FSM_KEEP_PARAM;
phl_fsm_state_goto(pscan->fsm_obj, SCAN_ST_REQ_PWR);
pscan->candidate = 0;
break;
case SCAN_EV_NOTIFY_PENDING_SCAN_REQ:
chk_pending_req(pscan);
break;
case FSM_EV_STATE_OUT:
phl_fsm_cancel_alarm(pscan->fsm_obj);
break;
default:
break;
}
return rtn;
}
static int scan_req_pwr_st_hdl(void *obj, u16 event, void *param)
{
struct scan_obj *pscan = (struct scan_obj *)obj;
int rtn = FSM_FREE_PARAM;
enum rtw_phl_status phl_st = RTW_PHL_STATUS_SUCCESS;
switch (event) {
case FSM_EV_STATE_IN:
if (phl_st == RTW_PHL_STATUS_PENDING) {
/* we have to wait SCAN_EV_REQ_PWR_OK */
phl_fsm_set_alarm(pscan->fsm_obj,
MAX_POWER_ON_TIME, SCAN_EV_REQ_PWR_TIMEOUT);
break;
}
if (phl_st != RTW_PHL_STATUS_SUCCESS) {
FSM_ERR(pscan->fsm, "%s power on fail(%d)\n",
phl_fsm_obj_name(pscan->fsm_obj), phl_st);
phl_fsm_state_goto(pscan->fsm_obj, SCAN_ST_IDLE);
break;
}
/* fall through */
case SCAN_EV_REQ_PWR_OK:
if (scan_start(pscan) < 0) {
phl_fsm_state_goto(pscan->fsm_obj, SCAN_ST_IDLE);
break;
}
phl_fsm_state_goto(pscan->fsm_obj, SCAN_ST_OFF_CH);
break;
case SCAN_EV_REQ_PWR_FAIL:
case SCAN_EV_REQ_PWR_TIMEOUT:
FSM_ERR(pscan->fsm, "%s power on fail\n",
phl_fsm_obj_name(pscan->fsm_obj));
/* fall through */
case FSM_EV_CANCEL:
phl_fsm_state_goto(pscan->fsm_obj, SCAN_ST_IDLE);
break;
case FSM_EV_STATE_OUT:
phl_fsm_cancel_alarm(pscan->fsm_obj);
break;
default:
break;
}
return rtn;
}
int handle_off_ch_cmd(struct scan_obj *pscan)
{
struct rtw_phl_scan_param *param = pscan->param;
void *d = phl_to_drvpriv(pscan->phl_info);
int off_ch_tx_num = pscan->off_ch_cmd_q.cnt;
struct fsm_msg *msg;
_os_list *obj;
while (pq_pop(d, &pscan->off_ch_cmd_q, &obj, _first, _bh)) {
msg = (struct fsm_msg *)obj;
if (param && param->ops && param->ops->scan_off_ch_tx)
param->ops->scan_off_ch_tx(param->priv,
param, msg->param);
scan_free_msg(pscan, msg);
}
return off_ch_tx_num;
}
/* swtich channel and scan
*/
static int scan_off_ch_st_hdl(void *obj, u16 event, void *param)
{
struct scan_obj *pscan = (struct scan_obj *)obj;
struct rtw_phl_scan_param *p = pscan->param;
struct phl_scan_channel *scan_ch;
bool need_wait;
switch (event) {
case FSM_EV_STATE_IN:
pscan->off_ch_step = OFF_CH_STEP_CLEAR;
if (scan_off_chan(pscan, true, &need_wait) < 0) {
/* issue Null-1 fail */
phl_fsm_state_goto(pscan->fsm_obj, SCAN_ST_IDLE);
break;
}
if (need_wait == true) {
/* TODO wait tx report */
scan_set_timer(pscan,
SCAN_NULL_PKT_TIME, SCAN_EV_PS_ANN_DONE);
break;
}
/* fall through */
case SCAN_EV_PS_ANN_DONE:
scan_flush_tx_queue(pscan);
scan_pause_tx(pscan);
scan_set_filter(pscan);
/* start back_op_off_ch_dur */
if ((p->back_op_mode == SCAN_BKOP_TIMER) &&
pscan->back_op_is_required)
phl_fsm_set_alarm_ext(pscan->fsm_obj,
p->back_op_off_ch_dur_ms,
SCAN_EV_BKOP_OFF_CH_EXPIRE,
ALM_BKOP_OFF_CH, NULL);
/* dequeue off_ch_cmd */
if (handle_off_ch_cmd(pscan) > 0) {
if (pscan->back_op_is_required == true)
phl_fsm_extend_alarm_ext(pscan->fsm_obj,
pscan->param->back_op_off_ch_ext_dur_ms,
ALM_BKOP_OFF_CH);
else
phl_fsm_extend_alarm_ext(pscan->fsm_obj,
pscan->param->back_op_off_ch_ext_dur_ms,
ALM_NEXT_CH);
}
/* fall through */
case SCAN_EV_NEXT_CH:
phl_fsm_cancel_alarm_ext(pscan->fsm_obj, ALM_PROBE_REQ);
#ifdef CONFIG_RTW_ACS
scan_acs_mntr_result(pscan);
#endif
scan_ch = scan_select_channel(pscan);
if (scan_ch == NULL) {
/* no more channel, we are done */
phl_fsm_state_goto(pscan->fsm_obj, SCAN_ST_IDLE);
break;
}
if (scan_ch->scan_mode == BACKOP_MODE) {
phl_fsm_state_goto(pscan->fsm_obj, SCAN_ST_BACK_OP);
break;
}
pscan->probe_cnts = SCAN_PROBE_TIMES;
scan_set_channel_bw(pscan, scan_ch->channel, CHANNEL_WIDTH_20,
CHAN_OFFSET_NO_EXT, 0);
#ifdef CONFIG_RTW_ACS
scan_acs_mntr_trigger(pscan, scan_ch);
#endif
if (phl_fsm_is_alarm_off_ext(
pscan->fsm_obj, ALM_NEXT_CH)) {
phl_fsm_set_alarm_ext(pscan->fsm_obj, scan_ch->duration,
SCAN_EV_NEXT_CH, ALM_NEXT_CH, NULL);
}
/* fall through */
case SCAN_EV_NEXT_PROBE_REQ:
if (pscan->probe_cnts-- == 0)
break;
if (pscan->ops->scan_issue_pbreq == NULL ||
p->scan_ch->type == RTW_PHL_SCAN_PASSIVE)
break;
scan_issue_probereq(pscan);
phl_fsm_set_alarm_ext(pscan->fsm_obj, SCAN_PROBE_INTERVAL,
SCAN_EV_NEXT_PROBE_REQ, ALM_PROBE_REQ, NULL);
break;
case SCAN_EV_BKOP_OFF_CH_EXPIRE:
p->scan_ch = &pscan->back_op_ch;
phl_fsm_state_goto(pscan->fsm_obj, SCAN_ST_BACK_OP);
break;
case SCAN_EV_FORCE_ACTIVE:
/* Are we in active scan channel */
if (p->scan_ch->type == RTW_PHL_SCAN_ACTIVE)
break;
scan_ch = (struct phl_scan_channel *)param;
/* request ch mismaches current scan ch */
if (scan_ch->channel != p->scan_ch->channel)
break;
#ifdef RTW_WKARD_PHL_FSM_SCAN_PASSIVE_TO_ACTIVE
if (p->scan_ch->ext_act_scan == EXT_ACT_SCAN_ENABLE)
p->scan_ch->ext_act_scan = EXT_ACT_SCAN_DONE;
else {
FSM_INFO(pscan->fsm, "ch(%d) scan type not changed, ext_act_scan(%d) \n",
scan_ch->channel, p->scan_ch->ext_act_scan);
break;
}
#endif
FSM_INFO(pscan->fsm, "ch(%d) PASSIVE->ACTIVE!\n", scan_ch->channel);
p->scan_ch->type = RTW_PHL_SCAN_ACTIVE;
scan_issue_probereq(pscan);
phl_fsm_set_alarm_ext(pscan->fsm_obj, SCAN_PROBE_INTERVAL,
SCAN_EV_NEXT_PROBE_REQ, ALM_PROBE_REQ, NULL);
break;
case SCAN_EV_NOTIFY_OFF_CH_CMD:
/* dequeue off_ch_cmd */
if (handle_off_ch_cmd(pscan) > 0) {
if (pscan->back_op_is_required == true)
phl_fsm_extend_alarm_ext(pscan->fsm_obj,
pscan->param->back_op_off_ch_ext_dur_ms,
ALM_BKOP_OFF_CH);
else
phl_fsm_extend_alarm_ext(pscan->fsm_obj,
pscan->param->back_op_off_ch_ext_dur_ms,
ALM_NEXT_CH);
}
break;
case SCAN_EV_PAUSE:
phl_fsm_pause_alarm(pscan->fsm_obj);
phl_fsm_pause_alarm_ext(pscan->fsm_obj, ALM_NEXT_CH);
phl_fsm_pause_alarm_ext(pscan->fsm_obj, ALM_PROBE_REQ);
phl_fsm_pause_alarm_ext(pscan->fsm_obj, ALM_BKOP_OFF_CH);
break;
case SCAN_EV_RESUME:
phl_fsm_resume_alarm(pscan->fsm_obj);
phl_fsm_resume_alarm_ext(pscan->fsm_obj, ALM_NEXT_CH);
phl_fsm_resume_alarm_ext(pscan->fsm_obj, ALM_PROBE_REQ);
phl_fsm_resume_alarm_ext(pscan->fsm_obj, ALM_BKOP_OFF_CH);
break;
case FSM_EV_CANCEL:
phl_fsm_cancel_alarm_ext(pscan->fsm_obj, ALM_NEXT_CH);
phl_fsm_state_goto(pscan->fsm_obj, SCAN_ST_IDLE);
break;
case FSM_EV_STATE_OUT:
phl_fsm_cancel_alarm(pscan->fsm_obj);
phl_fsm_cancel_alarm_ext(pscan->fsm_obj, ALM_PROBE_REQ);
phl_fsm_cancel_alarm_ext(pscan->fsm_obj, ALM_BKOP_OFF_CH);
scan_clr_filter(pscan);
off_ch_return_set_ch_bw(pscan);
scan_resume_tx(pscan);
scan_off_chan(pscan, false, &need_wait);
if (p->scan_ch->scan_mode != BACKOP_MODE)
phl_fsm_cancel_alarm_ext(pscan->fsm_obj, ALM_NEXT_CH);
break;
default:
break;
}
return 0;
}
/* Stay in OP channel for a while
* OP channel normal data TRx
*/
static int scan_back_op_st_hdl(void *obj, u16 event, void *param)
{
struct scan_obj *pscan = (struct scan_obj *)obj;
u32 start_time = _os_get_cur_time_ms();
switch (event) {
case FSM_EV_STATE_IN:
scan_set_timer(pscan,
pscan->param->scan_ch->duration,
SCAN_EV_BKOP_ON_CH_EXPIRE);
break;
case SCAN_EV_BKOP_ON_CH_EXPIRE:
/* leve back_op channel */
phl_fsm_state_goto(pscan->fsm_obj, SCAN_ST_OFF_CH);
break;
case SCAN_EV_NEXT_CH:
phl_fsm_set_alarm_ext(pscan->fsm_obj,
phl_fsm_time_pass(start_time) + SCAN_NULL_PKT_TIME,
SCAN_EV_NEXT_CH, ALM_NEXT_CH, NULL);
phl_fsm_state_goto(pscan->fsm_obj, SCAN_ST_OFF_CH);
break;
case SCAN_EV_NOTIFY_OFF_CH_CMD:
phl_fsm_state_goto(pscan->fsm_obj, SCAN_ST_OFF_CH);
break;
case SCAN_EV_PAUSE:
phl_fsm_pause_alarm(pscan->fsm_obj);
break;
case SCAN_EV_RESUME:
phl_fsm_resume_alarm(pscan->fsm_obj);
break;
case FSM_EV_CANCEL:
phl_fsm_state_goto(pscan->fsm_obj, SCAN_ST_IDLE);
break;
case FSM_EV_STATE_OUT:
phl_fsm_cancel_alarm(pscan->fsm_obj);
break;
default:
break;
}
return 0;
}
static u32 scan_get_token(struct scan_obj *pscan)
{
pscan->token++;
if (pscan->token == ALL_TOKEN)
pscan->token++;
return pscan->token;
}
static void scan_dump_obj(void *obj, char *s, int *sz)
{
/* nothing to do for now */
}
static void scan_dump_fsm(void *fsm, char *s, int *sz)
{
/* nothing to do for now */
}
static void scan_dbg_help(struct scan_obj *pscan, char *p, int *sz)
{
int len = *sz;
_os_snprintf(pstr(p), lstr(p, len),
"usage:\n\t<%s> req del <0|token>\n\t<%s> req <pause|resume>\n",
phl_fsm_obj_name(pscan->fsm_obj),
phl_fsm_obj_name(pscan->fsm_obj));
*sz = len;
}
static void scan_debug(void *obj, char input[][MAX_ARGV], u32 input_num,
char *output, u32 *out_len)
{
struct scan_obj *pscan = (struct scan_obj *)obj;
char *ptr = output;
int len = *out_len;
int token;
if (input_num < 2) {
scan_dbg_help(pscan, ptr, &len);
goto done;
}
if (!_os_strcmp(input[0], "req")) {
if (!_os_strcmp(input[1], "del")) {
/* del scan request */
/* cmd: scan-1 req del <token> */
/* read token */
if (_os_sscanf(input[2], "%d", &token) != 1) {
_os_snprintf(pstr(ptr), lstr(ptr, len),
"%s Err missing token\n",
phl_fsm_obj_name(pscan->fsm_obj));
goto done;
}
_os_snprintf(pstr(ptr), lstr(ptr, len),
"%s del token %d\n",
phl_fsm_obj_name(pscan->fsm_obj), token);
rtw_phl_scan_del_request(pscan->phl_info, token);
} else if (!_os_strcmp(input[1], "pause")) {
/* cmd: scan-1 req pauase */
_os_snprintf(pstr(ptr), lstr(ptr, len),
"%s pause\n",
phl_fsm_obj_name(pscan->fsm_obj));
rtw_phl_scan_pause(pscan->phl_info);
} else if (!_os_strcmp(input[1], "resume")) {
/* cmd: scan-1 req resume */
_os_snprintf(pstr(ptr), lstr(ptr, len),
"%s resume\n",
phl_fsm_obj_name(pscan->fsm_obj));
rtw_phl_scan_resume(pscan->phl_info);
}
} else
scan_dbg_help(pscan, ptr, &len);
done:
*out_len = len;
}
/* For EXTERNAL application to create a scan FSM */
/* @root: FSM root structure
* @phl: private data structure to invoke hal/phl function
*
* return
* fsm_main: FSM main structure (Do NOT expose)
*/
struct fsm_main *phl_scan_new_fsm(struct fsm_root *root,
struct phl_info_t *phl_info)
{
void *d = phl_to_drvpriv(phl_info);
struct fsm_main *fsm = NULL;
struct rtw_phl_fsm_tb tb;
_os_mem_set(d, &tb, 0, sizeof(tb));
tb.max_state = sizeof(scan_state_tbl)/sizeof(scan_state_tbl[0]);
tb.max_event = sizeof(scan_event_tbl)/sizeof(scan_event_tbl[0]);
tb.state_tbl = scan_state_tbl;
tb.evt_tbl = scan_event_tbl;
tb.dump_obj = scan_dump_obj;
tb.dump_fsm = scan_dump_fsm;
tb.debug = scan_debug;
tb.dbg_level = FSM_DBG_INFO;
tb.evt_level = FSM_DBG_INFO;
fsm = phl_fsm_init_fsm(root, "scan", phl_info, &tb);
return fsm;
}
/* For EXTERNAL application to destory scan fsm */
/* @fsm: see fsm_main
*/
void phl_scan_destory_fsm(struct fsm_main *fsm)
{
if (fsm == NULL)
return;
/* deinit fsm local variable if has */
/* call FSM Framewro to deinit fsm */
phl_fsm_deinit_fsm(fsm);
}
/* For EXTERNAL application to create scan object */
/* @fsm: FSM main structure which created by phl_scan_new_fsm()
* @phl_info: private data structure of caller
*
* return
* scan_obj: structure of scan object (Do NOT expose)
*/
struct scan_obj *phl_scan_new_obj(struct fsm_main *fsm,
struct phl_info_t *phl_info)
{
void *d = phl_to_drvpriv(phl_info);
struct fsm_obj *obj;
struct scan_obj *pscan;
pscan = phl_fsm_new_obj(fsm, (void **)&obj, sizeof(*pscan));
if (pscan == NULL) {
/* TODO free fsm; currently will be freed in deinit process */
FSM_ERR(fsm, "scan: malloc obj fail\n");
return NULL;
}
pscan->fsm = fsm;
pscan->fsm_obj = obj;
pscan->phl_info = phl_info;
/* init obj local use variable */
pq_init(d, &pscan->req_q);
pq_init(d, &pscan->off_ch_cmd_q);
pscan->param = NULL;
pscan->token_running = 0;
pscan->off_ch_step = OFF_CH_STEP_CLEAR;
pscan->back_op_is_required = false;
pscan->back_op_ch.scan_mode = BACKOP_MODE;
pscan->back_op_ch.duration = BACK_OP_CH_DUR_MS;
return pscan;
}
/* For EXTERNAL application to destory scan object */
/* @pscan: local created scan object
*/
void phl_scan_destory_obj(struct scan_obj *pscan)
{
void *d;
_os_list *obj;
if (pscan == NULL)
return;
d = phl_to_drvpriv(pscan->phl_info);
/* deinit and free all local variables */
while (pq_pop(d, &pscan->req_q, &obj, _first, _bh))
scan_free_req(pscan, (struct fsm_msg *)obj);
pq_deinit(d, &pscan->req_q);
while (pq_pop(d, &pscan->off_ch_cmd_q, &obj, _first, _bh))
scan_free_msg(pscan, (struct fsm_msg *)obj);
pq_deinit(d, &pscan->off_ch_cmd_q);
/* clear scan param */
if (pscan->param) {
if (pscan->param && pscan->param->ch && pscan->param->ch_sz)
_os_kmem_free(d, pscan->param->ch, pscan->param->ch_sz);
_os_kmem_free(d, pscan->param, sizeof(*pscan->param));
pscan->param = NULL;
}
/* inform FSM framewory to recycle fsm_obj */
phl_fsm_destory_obj(pscan->fsm_obj);
}
/* For EXTERNAL application to request scan (expose) */
/* @pscan: scan object
* @pbuf: scan parameter, will be freed by caller after retrun
* @order: queuing order
*/
enum rtw_phl_status rtw_phl_scan_request(void *phl,
struct rtw_phl_scan_param *pbuf, enum PRECEDE order)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct scan_obj *pscan = phl_info->scan_obj;
void *d = phl_to_drvpriv(pscan->phl_info);
struct rtw_phl_scan_param *param = NULL;
struct phl_scan_channel *pch = NULL;
struct fsm_msg *msg;
pch = (struct phl_scan_channel *)_os_kmem_alloc(d, pbuf->ch_sz);
if (pch == NULL)
goto pch_fail;
param = (struct rtw_phl_scan_param *)_os_kmem_alloc(d, sizeof(*pbuf));
if (param == NULL)
goto param_fail;
/* NEW message to start scan */
msg = phl_fsm_new_msg(pscan->fsm_obj, SCAN_EV_START);
if (msg == NULL)
goto msg_fail;
/* fill token */
pbuf->token = scan_get_token(pscan);
_os_mem_cpy(d, pch, pbuf->ch, pbuf->ch_sz);
_os_mem_cpy(d, param, pbuf, sizeof(*param));
param->ch = pch;
param->ch_idx = -1;
param->scan_ch = ¶m->ch[0];
param->result = SCAN_REQ_ABORT;
param->enqueue_time = _os_get_cur_time_ms();
msg->param = param;
msg->param_sz = sizeof(*param);
/*
* Enqueue scan_request into extra queue (pending scan)
*/
switch (order) {
case IMMEDIATE:
pq_push(d, &pscan->req_q, (_os_list *)msg, _first, _bh);
/* Cancel running scan process */
phl_fsm_gen_msg(phl, pscan->fsm_obj, NULL, 0, FSM_EV_CANCEL);
/* fall through */
break;
case TO_HEAD:
pq_push(d, &pscan->req_q, (_os_list *)msg, _first, _bh);
break;
case TO_TAIL:
default:
pq_push(d, &pscan->req_q, (_os_list *)msg, _tail, _bh);
break;
}
/* notify scan-obj to dequeue from extra queue */
return phl_fsm_gen_msg(phl, pscan->fsm_obj,
NULL, 0, SCAN_EV_NOTIFY_PENDING_SCAN_REQ);
msg_fail:
_os_kmem_free(d, param, sizeof(*param));
param_fail:
_os_kmem_free(d, pch, sizeof(*pch));
pch_fail:
FSM_ERR(pscan->fsm, "%s: %s() malloc fail\n",
phl_fsm_obj_name(pscan->fsm_obj), __func__);
return RTW_PHL_STATUS_RESOURCE;
}
/* For EXTERNAL application to del scan request (expose) */
/* @phl: phl_info_t
* @token: token to be deleted
* 0 means del all requests and cancel running scan
*/
enum rtw_phl_status rtw_phl_scan_del_request(void *phl, u32 token)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct scan_obj *pscan = phl_info->scan_obj;
void *d = phl_to_drvpriv(phl_info);
struct fsm_msg *msg, *msg_t;
struct rtw_phl_scan_param *param = NULL;
_os_list *obj;
if (token == ALL_TOKEN) {
while (pq_pop(d, &pscan->req_q, &obj, _first, _bh)) {
msg = (struct fsm_msg *)obj;
param = msg->param;
FSM_INFO(pscan->fsm, "%s abort token %d\n",
phl_fsm_obj_name(pscan->fsm_obj), param->token);
if (param && param->ops && param->ops->scan_complete)
param->ops->scan_complete(param->priv, param);
scan_free_req(pscan, (struct fsm_msg *)obj);
}
} else {
/* search token in scan request queue */
_os_spinlock(d, &pscan->req_q.lock, _bh, NULL);
phl_list_for_loop_safe(msg, msg_t,
struct fsm_msg, &pscan->req_q.queue, list) {
param = (struct rtw_phl_scan_param *)msg->param;
if (param && param->token == token) {
list_del(&msg->list);
pscan->req_q.cnt--;
_os_spinunlock(d, &pscan->req_q.lock, _bh, NULL);
FSM_INFO(pscan->fsm, "%s abort token %d\n",
phl_fsm_obj_name(pscan->fsm_obj),
token);
/* callback to caller */
if (param->ops && param->ops->scan_complete)
param->ops->scan_complete(param->priv,
param);
scan_free_req(pscan, msg);
return RTW_PHL_STATUS_SUCCESS;
}
}
_os_spinunlock(d, &pscan->req_q.lock, _bh, NULL);
FSM_INFO(pscan->fsm, "%s del token %d not found\n",
phl_fsm_obj_name(pscan->fsm_obj), token);
}
if ((pscan->param) &&
((pscan->param->token == token) || ALL_TOKEN == token)) {
/* we have a running scan, cancel it */
FSM_INFO(pscan->fsm, "%s cancel running scan token %d\n",
phl_fsm_obj_name(pscan->fsm_obj), pscan->param->token);
phl_fsm_cancel_obj(pscan->fsm_obj);
}
return RTW_PHL_STATUS_SUCCESS;
}
/* For EXTERNAL application to cancel scan (expose) */
/* @pscan: scan object will be cancelled
*/
enum rtw_phl_status rtw_phl_scan_cancel(void *phl)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct scan_obj *pscan = phl_info->scan_obj;
return phl_fsm_cancel_obj(pscan->fsm_obj);
}
/* For EXTERNAL application to pause scan (expose)
* @phl: phl pirvate
*/
enum rtw_phl_status rtw_phl_scan_pause(void *phl)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct scan_obj *pscan = phl_info->scan_obj;
return phl_fsm_gen_msg(phl, pscan->fsm_obj, NULL, 0, SCAN_EV_PAUSE);
}
/* For EXTERNAL application to resume scan (expose)
* @phl: phl pirvate
*/
enum rtw_phl_status rtw_phl_scan_resume(void *phl)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct scan_obj *pscan = phl_info->scan_obj;
return phl_fsm_gen_msg(phl, pscan->fsm_obj, NULL, 0, SCAN_EV_RESUME);
}
/* For EXTERNAL application to change passive scan to active scan (expose)
* @phl: phl pirvate
* @ch: channel to be changed to active scan
* @duration: scan time for the channel; 0: half of original duration
*
* Usually triggered when receiving Beacon in a passive scan channel.
* Changing current scan ch from passive to active scan.
* (issue probe request in current scan channel)
* More easy to find a hidden ssid AP in passive scan channel.
*/
enum rtw_phl_status rtw_phl_scan_force_active_scan(void *phl,
u16 channel, u16 duration)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct scan_obj *pscan = phl_info->scan_obj;
struct phl_scan_channel ch;
_os_mem_set(phl_to_drvpriv(phl_info), &ch, 0, sizeof(ch));
ch.channel = channel;
ch.duration = duration;
return phl_fsm_gen_msg(phl, pscan->fsm_obj, &ch,
sizeof(ch), SCAN_EV_FORCE_ACTIVE);
}
/* For EXTERNAL application to get inprogress scan and requests num (expose)
* @phl: phl pirvate
*
* return: 0: no scan is inprogressing
* otherwise: number of queuing scan requests + inprogress scan
*/
int rtw_phl_scan_inprogress_req_num(void *phl)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct scan_obj *pscan = phl_info->scan_obj;
u8 running = 0;
if (pscan->param != NULL)
running = 1;
return pscan->req_q.cnt + running;
}
bool rtw_phl_scan_scanning_req(void *phl, u32 token, u32 *token_running)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct scan_obj *pscan = phl_info->scan_obj;
*token_running = pscan->token_running;
return (token == *token_running);
}
enum rtw_phl_status rtw_phl_scan_off_ch_tx(void *phl, void *data, int len)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct scan_obj *pscan = phl_info->scan_obj;
struct rtw_phl_scan_param *param = pscan->param;
void *d = phl_to_drvpriv(pscan->phl_info);
struct fsm_msg *msg;
void *buf;
if (!param || !param->ops || !param->ops->scan_off_ch_tx)
return RTW_PHL_STATUS_FAILURE;
buf = _os_kmem_alloc(d, len);
if (buf == NULL)
goto buf_fail;
msg = phl_fsm_new_msg(pscan->fsm_obj, SCAN_EV_NOTIFY_OFF_CH_CMD);
if (msg == NULL)
goto msg_fail;
_os_mem_cpy(d, buf, data, len);
msg->param = buf;
msg->param_sz = len;
pq_push(d, &pscan->off_ch_cmd_q, (_os_list *)msg, _tail, _bh);
FSM_INFO(pscan->fsm, "%s %s(), q_cnt = %d\n",
phl_fsm_obj_name(pscan->fsm_obj), __func__,
pscan->off_ch_cmd_q.cnt);
/* notify scan-obj there is an off_ch_cmd */
return phl_fsm_gen_msg(phl, pscan->fsm_obj,
NULL, 0, SCAN_EV_NOTIFY_OFF_CH_CMD);
msg_fail:
_os_kmem_free(d, buf, len);
buf_fail:
FSM_ERR(pscan->fsm, "%s %s() malloc fail\n",
phl_fsm_obj_name(pscan->fsm_obj), __func__);
return RTW_PHL_STATUS_RESOURCE;
}
#endif /*CONFIG_FSM*/
|
2301_81045437/rtl8852be
|
phl/phl_scan_fsm.c
|
C
|
agpl-3.0
| 39,035
|
/******************************************************************************
*
* Copyright(c) 2019 - 2020 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#ifndef __PHL_SCAN_FSM_H__
#define __PHL_SCAN_FSM_H__
/* fsm private struct */
struct fsm_root;
struct fsm_main;
/* scan fsm init api */
struct fsm_main *phl_scan_new_fsm(struct fsm_root *fsm_m,
struct phl_info_t *phl_info);
void phl_scan_destory_fsm(struct fsm_main *fsm);
struct scan_obj *phl_scan_new_obj(struct fsm_main *fsm,
struct phl_info_t *phl_info);
void phl_scan_destory_obj(struct scan_obj *pscan);
bool phl_fsm_should_stop(struct fsm_main *fsm);
#endif /* __PHL_SCAN_FSM_H__ */
|
2301_81045437/rtl8852be
|
phl/phl_scan_fsm.h
|
C
|
agpl-3.0
| 1,152
|
/******************************************************************************
*
* Copyright(c) 2020 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#include "phl_headers.h"
#include "phl_scan_instance.h"
#include "phl_scan.h"
#include "phl_regulation.h"
enum {
ACTIVE_PERIOD_MIN = 40,
ACTIVE_PERIOD_DEFAULT = 60,
ACTIVE_PERIOD_MAX = 80
};
enum {
PASSIVE_PERIOD_MIN = 50,
PASSIVE_PERIOD_DEFAULT = 80,
PASSIVE_PERIOD_MAX = 110
};
struct rtw_pickup_channel {
enum band_type band;
u8 channel;
u8 property;
u8 picked;
};
struct rtw_pickup_chplan_group {
u32 cnt;
struct rtw_pickup_channel ch[MAX_CH_NUM_GROUP];
};
static void _set_inst_ch(enum period_strategy strategy,
struct instance_channel *dest,
enum band_type band,
u8 channel, u8 property)
{
u8 max_t[2] = {PASSIVE_PERIOD_MAX, ACTIVE_PERIOD_MAX};
u8 min_t[2] = {PASSIVE_PERIOD_MIN, ACTIVE_PERIOD_MIN};
u8 def_t[2] = {PASSIVE_PERIOD_DEFAULT, ACTIVE_PERIOD_DEFAULT};
dest->band = band;
dest->channel = channel;
dest->property = property;
dest->mode = NORMAL_SCAN_MODE;
dest->bw = CHANNEL_WIDTH_20;
dest->offset = CHAN_OFFSET_NO_EXT;
/* active or passive */
if ((dest->property & CH_PASSIVE) ||
(dest->property & CH_DFS))
dest->active = 0;
else
dest->active = 1;
if (strategy & PERIOD_ALL_MAX)
dest->period = max_t[dest->active];
else if (strategy & PERIOD_ALL_MIN)
dest->period = min_t[dest->active];
else {
if ((strategy & PERIOD_MIN_DFS) &&
(dest->property & CH_DFS))
dest->period = min_t[dest->active];
else
dest->period = def_t[dest->active];
}
PHL_INFO("[REGU], pick channel %d, active=%d, period=%d\n",
dest->channel, dest->active, dest->period);
}
static void _pick_active_channels(struct rtw_pickup_chplan_group *group,
struct instance_strategy *strategy,
struct instance *inst)
{
struct instance_channel *dest = NULL;
struct rtw_pickup_channel *src = NULL;
u32 i = 0;
dest = &inst->ch[inst->cnt];
for (i = 0; i < group->cnt; i++) {
src = &group->ch[i];
if (src->picked)
continue;
if (!(src->property & CH_PASSIVE) &&
!(src->property & CH_DFS) ) {
_set_inst_ch(strategy->period, dest,
src->band,
src->channel,
src->property);
inst->cnt++;
dest++;
src->picked = 1;
}
}
}
static void _pick_the_rest_channels(struct rtw_pickup_chplan_group *group,
struct instance_strategy *strategy,
struct instance *inst)
{
struct instance_channel *dest = NULL;
struct rtw_pickup_channel *src = NULL;
u32 i = 0;
dest = &inst->ch[inst->cnt];
for (i = 0; i < group->cnt; i++) {
src = &group->ch[i];
if (!src->picked) {
_set_inst_ch(strategy->period, dest,
src->band,
src->channel,
src->property);
inst->cnt++;
dest++;
src->picked = 1;
}
}
}
static void _pick_5ghz_channels(struct rtw_pickup_chplan_group *group,
struct instance_strategy *strategy,
struct instance *inst)
{
u8 order = strategy->order;
u8 i = 0;
if (order & ORDER_ACTIVE_PRIOR) {
for (i = FREQ_GROUP_5GHZ_BAND1;
i <= FREQ_GROUP_5GHZ_BAND4; i++) {
_pick_active_channels(&group[i], strategy, inst);
}
}
for (i = FREQ_GROUP_5GHZ_BAND1;
i <= FREQ_GROUP_5GHZ_BAND4; i++) {
_pick_the_rest_channels(&group[i], strategy, inst);
}
}
static void _pick_2ghz_channels(struct rtw_pickup_chplan_group *group,
struct instance_strategy *strategy,
struct instance *inst)
{
u8 order = strategy->order;
if (order & ORDER_ACTIVE_PRIOR)
_pick_active_channels(&group[FREQ_GROUP_2GHZ],
strategy, inst);
_pick_the_rest_channels(&group[FREQ_GROUP_2GHZ],
strategy, inst);
}
static void _pick_6ghz_channels(struct rtw_pickup_chplan_group *group,
struct instance_strategy *strategy,
struct instance *inst)
{
_pick_the_rest_channels(&group[FREQ_GROUP_6GHZ_PSC],
strategy, inst);
}
static void _generate_instance(
struct rtw_pickup_chplan_group *group,
struct instance_strategy *strategy,
struct instance *inst)
{
u8 order = strategy->order;
inst->cnt = 0;
if (order & ORDER_5GHZ_PRIOR) {
_pick_5ghz_channels(group, strategy, inst);
_pick_2ghz_channels(group, strategy, inst);
} else {
_pick_2ghz_channels(group, strategy, inst);
_pick_5ghz_channels(group, strategy, inst);
}
_pick_6ghz_channels(group, strategy, inst);
}
static void _select_channels_by_group(struct instance_strategy *strategy,
struct rtw_regulation_chplan *plan,
struct rtw_pickup_chplan_group *group)
{
u8 skip = strategy->skip;
u8 chnl = 0, property = 0, gpidx = 0, keep = 0;
enum band_type band = BAND_ON_24G;
u32 i = 0;
for (i = 0; i < plan->cnt; i++) {
band = plan->ch[i].band;
chnl = plan->ch[i].channel;
property = plan->ch[i].property;
keep = 0;
/* skip passive channels */
if ((skip & SKIP_PASSIVE) && (property & CH_PASSIVE))
continue;
/* skip DFS channels */
if ((skip & SKIP_DFS) && (property & CH_DFS))
continue;
if ((BAND_2GHZ(band)) && !(skip & SKIP_2GHZ)) {
gpidx = FREQ_GROUP_2GHZ;
keep = 1;
} else if ((BAND_5GHZ(band)) && !(skip & SKIP_5GHZ)) {
if (CH_5GHZ_BAND1(chnl))
gpidx = FREQ_GROUP_5GHZ_BAND1;
else if (CH_5GHZ_BAND2(chnl))
gpidx = FREQ_GROUP_5GHZ_BAND2;
else if (CH_5GHZ_BAND3(chnl))
gpidx = FREQ_GROUP_5GHZ_BAND3;
else if (CH_5GHZ_BAND4(chnl))
gpidx = FREQ_GROUP_5GHZ_BAND4;
else
continue;
keep = 1;
} else if ((BAND_6GHZ(band)) && !(skip & SKIP_6GHZ)) {
gpidx = FREQ_GROUP_6GHZ_PSC;
keep = 1;
}
if (keep) {
group[gpidx].ch[group[gpidx].cnt].band = band;
group[gpidx].ch[group[gpidx].cnt].channel = chnl;
group[gpidx].ch[group[gpidx].cnt].property = property;
group[gpidx].ch[group[gpidx].cnt].picked = 0;
group[gpidx].cnt++;
PHL_INFO("[REGU], keep group-%d channel %d, cnt=%d\n",
gpidx, chnl, group[gpidx].cnt);
}
}
}
bool rtw_phl_generate_scan_instance(struct instance_strategy *strategy,
struct rtw_regulation_chplan *chplan,
struct instance *inst)
{
struct rtw_pickup_chplan_group group[FREQ_GROUP_MAX] = {0};
u32 i = 0;
if (!strategy || !inst || !chplan)
return false;
PHL_INFO("[REGU], Generate Scan Instance, strategy [skip=0x%x, order=0x%x, period=0x%x] \n",
strategy->skip, strategy->order, strategy->period);
PHL_INFO("[REGU], Channel Plan Source : \n");
for (i = 0; i < chplan->cnt; i++) {
PHL_INFO("[REGU], %d. band=%d, ch=%d, dfs=%d, passive=%d (property=0x%x)\n",
i + 1, chplan->ch[i].band, chplan->ch[i].channel,
((chplan->ch[i].property & CH_DFS) ? 1 : 0),
((chplan->ch[i].property & CH_PASSIVE) ? 1 : 0),
chplan->ch[i].property);
}
/* step1 : remove "skip channels" and select channels into groups */
_select_channels_by_group(strategy, chplan, group);
/* step2 : generate instance by strategy */
_generate_instance(group, strategy, inst);
PHL_INFO("[REGU], Output Scan Instance : \n");
for (i = 0; i < inst->cnt; i++) {
PHL_INFO("[REGU], %d. band=%d, ch=%d, active=%d, period=%d \n",
i + 1, inst->ch[i].band,
inst->ch[i].channel,
inst->ch[i].active,
inst->ch[i].period);
}
return true;
}
bool rtw_phl_scan_instance_insert_ch(void *phl, struct instance *inst,
enum band_type band, u8 channel,
u8 strategy_period)
{
struct rtw_regulation_channel ch = {0};
struct instance_channel *dest = NULL;
if (!phl || !inst)
return false;
if (inst->cnt >= MAX_SCAN_INSTANCE)
return false;
if (rtw_phl_regulation_query_ch(phl, band, channel, &ch)) {
dest = &inst->ch[inst->cnt];
_set_inst_ch(strategy_period, dest,
ch.band, ch.channel, ch.property);
inst->cnt++;
return true;
}
return false;
}
|
2301_81045437/rtl8852be
|
phl/phl_scan_instance.c
|
C
|
agpl-3.0
| 8,100
|
/******************************************************************************
*
* Copyright(c) 2020 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#ifndef _PHL_SCAN_INSTANCE_H_
#define _PHL_SCAN_INSTANCE_H_
#define MAX_SCAN_INSTANCE 160
enum order_strategy {
ORDER_5GHZ_PRIOR = BIT(0),
ORDER_ACTIVE_PRIOR = BIT(1)
};
enum skip_strategy {
SKIP_2GHZ = BIT(0),
SKIP_5GHZ = BIT(1),
SKIP_PASSIVE = BIT(2),
SKIP_DFS = BIT(3),
SKIP_6GHZ = BIT(4),
};
enum period_strategy {
PERIOD_ALL_MAX = BIT(0),
PERIOD_ALL_MIN = BIT(1),
PERIOD_MIN_DFS = BIT(2)
};
struct instance_strategy {
u8 order;
u8 skip;
u8 period;
};
struct instance_channel {
enum band_type band;
u8 channel;
u8 property;
u8 active;
u8 period;
u8 mode;
u8 bw;
u8 offset;
};
struct instance {
u32 cnt; /* channel cnt */
struct instance_channel ch[MAX_SCAN_INSTANCE];
};
#endif /* _PHL_SCAN_INSTANCE_H_ */
|
2301_81045437/rtl8852be
|
phl/phl_scan_instance.h
|
C
|
agpl-3.0
| 1,391
|
/******************************************************************************
*
* Copyright(c) 2019 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#define _PHL_SEC_C_
#include "phl_headers.h"
#define RTW_PHL_EXT_KEY_LEN 32
#define RTW_SEC_KEY_TYPE_NUM 3
static enum rtw_phl_status
_phl_set_key(struct phl_info_t *phl_info,
struct rtw_phl_stainfo_t *sta,
struct phl_sec_param_h *crypt,
u8 *keybuf)
{
enum rtw_hal_status hal_status = RTW_HAL_STATUS_FAILURE;
enum rtw_phl_status phl_status = RTW_PHL_STATUS_FAILURE;
if(keybuf)
PHL_INFO("Add_key:: enc_type(%d) key_id(%d) key_type(%d)\n",
crypt->enc_type, crypt->keyid, crypt->key_type);
hal_status = rtw_hal_set_key(phl_info->hal,
sta,
crypt->enc_type,
(crypt->key_len==RTW_PHL_EXT_KEY_LEN)?1:0,
crypt->spp,
crypt->keyid,
crypt->key_type,
keybuf);
if (hal_status == RTW_HAL_STATUS_SUCCESS)
phl_status = RTW_PHL_STATUS_SUCCESS;
return phl_status;
}
#ifdef CONFIG_CMD_DISP
struct cmd_sec_param {
struct rtw_phl_stainfo_t *sta;
struct phl_sec_param_h *crypt;
u8 *keybuf;
};
static void
_phl_cmd_set_key_done(void *drv_priv,
u8 *cmd,
u32 cmd_len,
enum rtw_phl_status status)
{
struct cmd_sec_param *param = (struct cmd_sec_param *)cmd;
if (param) {
if (param->keybuf)
_os_kmem_free(drv_priv,
param->keybuf,
param->crypt->key_len);
if (param->crypt)
_os_kmem_free(drv_priv,
param->crypt,
sizeof(struct phl_sec_param_h));
_os_kmem_free(drv_priv, param, cmd_len);
}
}
enum rtw_phl_status
_phl_cmd_set_key(void *phl,
struct rtw_phl_stainfo_t *sta,
struct phl_sec_param_h *crypt,
u8 *keybuf,
enum phl_cmd_type cmd_type,
u32 cmd_timeout)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
void *drv = phl_to_drvpriv(phl_info);
enum rtw_phl_status psts = RTW_PHL_STATUS_FAILURE;
struct cmd_sec_param *param = NULL;
u32 param_len = 0, crypt_len = 0;
if (cmd_type == PHL_CMD_DIRECTLY) {
psts = _phl_set_key(phl_info, sta, crypt, keybuf);
goto _exit;
}
param_len = sizeof(struct cmd_sec_param);
param = _os_kmem_alloc(drv, param_len);
if (param == NULL) {
PHL_ERR("%s: alloc param failed!\n", __func__);
psts = RTW_PHL_STATUS_RESOURCE;
goto error_param;
}
_os_mem_set(drv, param, 0, param_len);
param->sta = sta;
crypt_len = sizeof(struct phl_sec_param_h);
param->crypt = _os_kmem_alloc(drv, crypt_len);
if (param->crypt == NULL) {
PHL_ERR("%s: alloc param->crypt failed!\n", __func__);
psts = RTW_PHL_STATUS_RESOURCE;
goto error_crypt;
}
_os_mem_set(drv, param->crypt, 0, crypt_len);
_os_mem_cpy(drv, param->crypt, crypt, crypt_len);
if (keybuf) { /* set key */
param->keybuf = _os_kmem_alloc(drv, param->crypt->key_len);
if (param->keybuf == NULL) {
PHL_ERR("%s: alloc param->keybuf failed!\n", __func__);
psts = RTW_PHL_STATUS_RESOURCE;
goto error_key_buf;
}
_os_mem_cpy(drv, param->keybuf, keybuf, param->crypt->key_len);
}
psts = phl_cmd_enqueue(phl_info,
sta->wrole->hw_band,
MSG_EVT_SEC_KEY,
(u8 *)param,
param_len,
_phl_cmd_set_key_done,
cmd_type,
cmd_timeout);
if (is_cmd_failure(psts)) {
/* Send cmd success, but wait cmd fail*/
psts = RTW_PHL_STATUS_FAILURE;
} else if (psts != RTW_PHL_STATUS_SUCCESS) {
/* Send cmd fail */
psts = RTW_PHL_STATUS_FAILURE;
goto error_cmd;
}
return psts;
error_cmd:
if (param->keybuf)
_os_kmem_free(drv, param->keybuf, param->crypt->key_len);
error_key_buf:
if (param->crypt)
_os_kmem_free(drv, param->crypt, crypt_len);
error_crypt:
if (param)
_os_kmem_free(drv, param, param_len);
error_param:
_exit:
return psts;
}
enum rtw_phl_status
phl_cmd_set_key_hdl(struct phl_info_t *phl_info, u8 *param)
{
struct cmd_sec_param *cmd_sec_param = (struct cmd_sec_param *)param;
return _phl_set_key(phl_info,
cmd_sec_param->sta,
cmd_sec_param->crypt,
cmd_sec_param->keybuf);
}
#endif /* CONFIG_CMD_DISP */
enum rtw_phl_status
rtw_phl_cmd_add_key(void *phl,
struct rtw_phl_stainfo_t *sta,
struct phl_sec_param_h *crypt,
u8 *keybuf,
enum phl_cmd_type cmd_type,
u32 cmd_timeout)
{
#ifdef CONFIG_CMD_DISP
return _phl_cmd_set_key(phl, sta, crypt, keybuf, cmd_type, cmd_timeout);
#else
PHL_TRACE(COMP_PHL_DBG, _PHL_INFO_, "%s: not support add key cmd\n",
__func__);
return _phl_set_key((struct phl_info_t *)phl, sta, crypt, keybuf);
#endif /* CONFIG_CMD_DISP */
}
enum rtw_phl_status
rtw_phl_cmd_del_key(void *phl,
struct rtw_phl_stainfo_t *sta,
struct phl_sec_param_h *crypt,
enum phl_cmd_type cmd_type,
u32 cmd_timeout)
{
#ifdef CONFIG_CMD_DISP
return _phl_cmd_set_key(phl, sta, crypt, NULL, cmd_type, cmd_timeout);
#else
PHL_TRACE(COMP_PHL_DBG, _PHL_INFO_, "%s: not support del key cmd\n",
__func__);
return _phl_set_key((struct phl_info_t *)phl, sta, crypt, NULL);
#endif /* CONFIG_CMD_DISP */
}
u8 rtw_phl_trans_sec_mode(u8 unicast, u8 multicast)
{
u8 ret = RTW_SEC_ENT_MODE_0;
if (RTW_ENC_NONE == unicast && RTW_ENC_NONE == multicast) {
ret = RTW_SEC_ENT_MODE_0;
} else if ((RTW_ENC_WEP40 == unicast && RTW_ENC_WEP40 == multicast) ||
(RTW_ENC_WEP104 == unicast && RTW_ENC_WEP104 == multicast)) {
ret = RTW_SEC_ENT_MODE_1;
} else if (RTW_ENC_WEP40 == multicast || RTW_ENC_WEP104 == multicast) {
ret = RTW_SEC_ENT_MODE_3;
} else {
ret = RTW_SEC_ENT_MODE_2;
}
return ret;
}
u8 rtw_phl_get_sec_cam_idx(void *phl,
struct rtw_phl_stainfo_t *sta,
u8 keyid,
u8 key_type)
{
u8 ret = 0;
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
ret = (u8) rtw_hal_search_key_idx(phl_info->hal, sta, keyid, key_type);
return ret;
}
|
2301_81045437/rtl8852be
|
phl/phl_sec.c
|
C
|
agpl-3.0
| 6,944
|
/******************************************************************************
*
* Copyright(c) 2019 - 2020 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#ifndef __PHL_SER_DEF_H__
#define __PHL_SER_DEF_H__
enum RTW_PHL_SER_NOTIFY_EVENT {
RTW_PHL_SER_L0_RESET, /* L0 notify only */
RTW_PHL_SER_PAUSE_TRX, /* M1 */
RTW_PHL_SER_DO_RECOVERY, /* M3 */
RTW_PHL_SER_READY, /* M5 */
RTW_PHL_SER_L2_RESET,/* M9 */
RTW_PHL_SER_EVENT_CHK,
RTW_PHL_SER_DUMP_FW_LOG,
RTW_PHL_SER_LOG_ONLY,
RTW_PHL_SER_MAX = 8
};
enum RTW_PHL_SER_RCVY_STEP {
RTW_PHL_SER_L1_DISABLE_EN = 0x0001,
RTW_PHL_SER_L1_RCVY_EN = 0x0002,
RTW_PHL_SER_L0_CFG_NOTIFY = 0x0010,
RTW_PHL_SER_L0_CFG_DIS_NOTIFY = 0x0011,
RTW_PHL_SER_L0_CFG_HANDSHAKE = 0x0012,
RTW_PHL_SER_L0_RCVY_EN = 0x0013,
};
#endif /* __PHL_SER_DEF_H__ */
|
2301_81045437/rtl8852be
|
phl/phl_ser_def.h
|
C
|
agpl-3.0
| 1,299
|
/******************************************************************************
*
* Copyright(c) 2019 - 2020 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#include "phl_headers.h"
#ifdef CONFIG_FSM
/*
* SER stands for System Error Recovery
*/
#define SER_FW_TIMEOUT 1000 /* ms */
#define SER_POLLING_INTERVAL 10 /* ms */
#define SER_USB_POLLING_INTERVAL_IDL 1000 /* ms */
#define SER_USB_POLLING_INTERVAL_ACT 10 /* ms */
#define SER_POLL_IO_TIMES 200
#define SER_USB_POLL_IO_TIMES 300
#define SER_POLL_BULK_TIMES 100
#define SER_L0 0x00000001
#define SER_L1 0x00000002
#define SER_L2 0x00000004
struct ser_obj {
struct fsm_main *fsm;
struct phl_info_t *phl_info;
struct fsm_obj *fsm_obj;
int poll_io_times;
int poll_bulk_times;
bool trigger_l2_reset;
bool trigger_l1_reset;
bool dynamicThredRunning;
_os_lock state_lock;
};
enum SER_STATE_ST {
SER_ST_IDLE,
SER_ST_L1_PAUSE_TRX,
SER_ST_L1_DO_HCI,
SER_ST_L1_RESUME_TRX,
SER_ST_L2
};
enum SER_EV_ID {
SER_EV_L1_START,
//SER_EV_L2_START,
//SER_EV_M1_PAUSE_TRX,
SER_EV_M3_DO_RECOVERY,
SER_EV_M5_READY,
SER_EV_M9_L2_RESET,
SER_EV_FW_TIMER_EXPIRE,
SER_EV_POLL_IO_EXPIRE,
SER_EV_POLL_BULK_EXPIRE,
SER_EV_POLL_USB_INT_EXPIRE,
SER_EV_CHK_SER_EVENT,
SER_EV_MAX
};
static int ser_idle_st_hdl(void *obj, u16 event, void *param);
static int ser_usb_idle_st_hdl(void *obj, u16 event, void *param);
static int ser_pci_l1_pause_trx_st_hdl(void *obj, u16 event, void *param);
static int ser_usb_l1_pause_trx_st_hdl(void *obj, u16 event, void *param);
static int ser_sdio_l1_pause_trx_st_hdl(void *obj, u16 event, void *param);
static int ser_pci_l1_do_hci_st_hdl(void *obj, u16 event, void *param);
static int ser_usb_l1_do_hci_st_hdl(void *obj, u16 event, void *param);
static int ser_sdio_l1_do_hci_st_hdl(void *obj, u16 event, void *param);
static int ser_l1_resume_trx_st_hdl(void *obj, u16 event, void *param);
static int ser_l2_st_hdl(void *obj, u16 event, void *param);
/* PCIE STATE table */
static struct fsm_state_ent ser_pci_state_tbl[] = {
ST_ENT(SER_ST_IDLE, ser_idle_st_hdl),
ST_ENT(SER_ST_L1_PAUSE_TRX, ser_pci_l1_pause_trx_st_hdl),
ST_ENT(SER_ST_L1_DO_HCI, ser_pci_l1_do_hci_st_hdl),
ST_ENT(SER_ST_L1_RESUME_TRX, ser_l1_resume_trx_st_hdl),
ST_ENT(SER_ST_L2, ser_l2_st_hdl)
};
/* USB STATE table */
static struct fsm_state_ent ser_usb_state_tbl[] = {
ST_ENT(SER_ST_IDLE, ser_usb_idle_st_hdl),
ST_ENT(SER_ST_L1_PAUSE_TRX, ser_usb_l1_pause_trx_st_hdl),
ST_ENT(SER_ST_L1_DO_HCI, ser_usb_l1_do_hci_st_hdl),
ST_ENT(SER_ST_L1_RESUME_TRX, ser_l1_resume_trx_st_hdl),
ST_ENT(SER_ST_L2, ser_l2_st_hdl)
};
/* SDIO STATE table */
static struct fsm_state_ent ser_sdio_state_tbl[] = {
ST_ENT(SER_ST_IDLE, ser_idle_st_hdl),
ST_ENT(SER_ST_L1_PAUSE_TRX, ser_sdio_l1_pause_trx_st_hdl),
ST_ENT(SER_ST_L1_DO_HCI, ser_sdio_l1_do_hci_st_hdl),
ST_ENT(SER_ST_L1_RESUME_TRX, ser_l1_resume_trx_st_hdl),
ST_ENT(SER_ST_L2, ser_l2_st_hdl)
};
/* EVENT table */
static struct fsm_event_ent ser_event_tbl[] = {
EV_ENT(SER_EV_L1_START),
//EV_ENT(SER_EV_L2_START),
//EV_ENT(SER_EV_M1_PAUSE_TRX),
EV_ENT(SER_EV_M3_DO_RECOVERY),
EV_ENT(SER_EV_M5_READY),
EV_ENT(SER_EV_M9_L2_RESET),
EV_ENT(SER_EV_FW_TIMER_EXPIRE),
EV_ENT(SER_EV_POLL_IO_EXPIRE),
EV_ENT(SER_EV_POLL_BULK_EXPIRE),
EV_DBG(SER_EV_POLL_USB_INT_EXPIRE),
EV_ENT(SER_EV_CHK_SER_EVENT),
EV_ENT(SER_EV_MAX) /* EV_MAX for fsm safety checking */
};
static enum rtw_phl_status _ser_event_notify(void *phl, u8* p_ntfy)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct ser_obj *pser = phl_info->ser_obj;
struct phl_msg msg = {0};
enum RTW_PHL_SER_NOTIFY_EVENT notify = RTW_PHL_SER_L2_RESET;
u32 err = 0;
notify = rtw_hal_ser_get_error_status(pser->phl_info->hal, &err);
if (p_ntfy != NULL)
*p_ntfy = notify;
phl_info->phl_com->phl_stats.ser_event[notify]++;
PHL_TRACE(COMP_PHL_DBG, _PHL_INFO_, "_ser_event_notify, error 0x%x, notify 0x%x\n", err, notify);
if (notify == RTW_PHL_SER_L0_RESET) {
PHL_TRACE(COMP_PHL_DBG, _PHL_WARNING_, "_ser_event_notify, hit L0 Reset\n");
return RTW_PHL_STATUS_SUCCESS;
}
if (notify == RTW_PHL_SER_LOG_ONLY) {
PHL_TRACE(COMP_PHL_DBG, _PHL_WARNING_, "_ser_event_notify, RTW_PHL_SER_LOG_ONLY\n");
return RTW_PHL_STATUS_SUCCESS;
}
if (notify == RTW_PHL_SER_DUMP_FW_LOG) {
PHL_TRACE(COMP_PHL_DBG, _PHL_WARNING_, "_ser_event_notify, RTW_PHL_SER_DUMP_FW_LOG\n");
SET_MSG_MDL_ID_FIELD(msg.msg_id, PHL_MDL_PHY_MGNT);
SET_MSG_EVT_ID_FIELD(msg.msg_id, MSG_EVT_DUMP_PLE_BUFFER);
phl_msg_hub_send(phl_info, NULL, &msg);
return RTW_PHL_STATUS_SUCCESS;
}
return phl_ser_send_msg(phl, notify);
}
/*
* SER sub function
*/
void ser_pcie_pause_dma_io(struct ser_obj *pser)
{
FSM_DBG(pser->fsm, "%s: %s\n",
phl_fsm_obj_name(pser->fsm_obj), __func__);
}
int ser_polling_io_state(struct ser_obj *pser)
{
FSM_DBG(pser->fsm, "%s: %s\n",
phl_fsm_obj_name(pser->fsm_obj), __func__);
return 0; /* success */
}
void ser_return_all_tcb_and_mem(struct ser_obj *pser)
{
FSM_DBG(pser->fsm, "%s: %s\n",
phl_fsm_obj_name(pser->fsm_obj), __func__);
}
void ser_send_l0_handle_method_event(struct ser_obj *pser, enum RTW_PHL_SER_RCVY_STEP event)
{
FSM_DBG(pser->fsm, "%s: %s with event = 0x%04X\n",
phl_fsm_obj_name(pser->fsm_obj), __func__, event);
/*
* Default use notify only RTW_PHL_SER_L0_CFG_NOTIFY
* if need do l0 debug, set RTW_PHL_SER_L0_CFG_HANDSHAKE when init.
* 1. after receive L0 notify within h2c, dump some Crs for debug.
* 2. set RTW_PHL_SER_L0_RCVY_EN
*/
phl_ser_event_to_fw(pser->phl_info, event);
}
void ser_send_l0_do_rcvy_event(struct ser_obj *pser)
{
FSM_DBG(pser->fsm, "%s: %s\n",
phl_fsm_obj_name(pser->fsm_obj), __func__);
/*Default use notify only RTW_PHL_SER_L0_CFG_NOTIFY*/
phl_ser_event_to_fw(pser->phl_info, RTW_PHL_SER_L0_RCVY_EN);
}
void ser_send_m2_event(struct ser_obj *pser)
{
FSM_DBG(pser->fsm, "%s: %s\n",
phl_fsm_obj_name(pser->fsm_obj), __func__);
phl_ser_event_to_fw(pser->phl_info, RTW_PHL_SER_L1_DISABLE_EN);
}
void ser_send_m4_event(struct ser_obj *pser)
{
FSM_DBG(pser->fsm, "%s: %s\n",
phl_fsm_obj_name(pser->fsm_obj), __func__);
phl_ser_event_to_fw(pser->phl_info, RTW_PHL_SER_L1_RCVY_EN);
}
void ser_usb_trx_disable_cr(struct ser_obj *pser)
{
FSM_DBG(pser->fsm, "%s: %s\n",
phl_fsm_obj_name(pser->fsm_obj), __func__);
}
int ser_usb_wait_all_bulk_complete(struct ser_obj *pser)
{
FSM_DBG(pser->fsm, "%s: %s\n",
phl_fsm_obj_name(pser->fsm_obj), __func__);
return 0; /* success */
}
void ser_pci_clear_rw_pointer(struct ser_obj *pser)
{
FSM_DBG(pser->fsm, "%s: %s\n",
phl_fsm_obj_name(pser->fsm_obj), __func__);
}
void ser_pci_assert_dessert_dma_reset(struct ser_obj *pser)
{
FSM_DBG(pser->fsm, "%s: %s\n",
phl_fsm_obj_name(pser->fsm_obj), __func__);
}
void ser_pci_reset_bdram(struct ser_obj *pser)
{
FSM_DBG(pser->fsm, "%s: %s\n",
phl_fsm_obj_name(pser->fsm_obj), __func__);
}
void ser_pci_enable_dma_io(struct ser_obj *pser)
{
FSM_DBG(pser->fsm, "%s: %s\n",
phl_fsm_obj_name(pser->fsm_obj), __func__);
}
void ser_usb_enable_bulk_IO(struct ser_obj *pser)
{
FSM_DBG(pser->fsm, "%s: %s\n",
phl_fsm_obj_name(pser->fsm_obj), __func__);
}
void ser_resume_trx_process(struct ser_obj *pser, u8 type)
{
struct phl_hci_trx_ops *ops = pser->phl_info->hci_trx_ops;
FSM_DBG(pser->fsm, "%s: %s\n",
phl_fsm_obj_name(pser->fsm_obj), __func__);
ops->trx_resume(pser->phl_info, type);
}
#ifdef RTW_WKARD_SER_USB_POLLING_EVENT
extern bool rtw_phl_recognize_interrupt(void *phl);
extern enum rtw_phl_status rtw_phl_interrupt_handler(void *phl);
static void ser_usb_chk_int_event(struct ser_obj *pser)
{
if (true == rtw_phl_recognize_interrupt(pser->phl_info))
rtw_phl_interrupt_handler(pser->phl_info);
}
#endif
/*
* SER state handler
*/
u8 rtw_phl_ser_inprogress(void *phl)
{
return phl_ser_inprogress(phl);
}
void rtw_phl_ser_clear_status(void *phl, u32 serstatus)
{
phl_ser_clear_status(phl, serstatus);
}
/*
* ser idle handler
*/
static int ser_idle_st_hdl(void *obj, u16 event, void *param)
{
struct ser_obj *pser = (struct ser_obj *)obj;
u8 notify;
switch (event) {
case FSM_EV_STATE_IN:
break;
case SER_EV_CHK_SER_EVENT:
if (true == rtw_hal_recognize_halt_c2h_interrupt(pser->phl_info->hal)) {
_ser_event_notify(pser->phl_info, ¬ify);
if ((notify == RTW_PHL_SER_L0_RESET) || (notify == RTW_PHL_SER_L2_RESET))
rtw_hal_config_interrupt(pser->phl_info->hal, RTW_PHL_EN_HCI_INT);
}
else {
rtw_hal_config_interrupt(pser->phl_info->hal, RTW_PHL_EN_HCI_INT);
}
break;
case SER_EV_L1_START:
phl_fsm_state_goto(pser->fsm_obj, SER_ST_L1_PAUSE_TRX);
break;
#if 0
case SER_EV_L2_START:
phl_fsm_state_goto(pser->fsm_obj, SER_ST_L2);
break;
#endif
case SER_EV_M9_L2_RESET:
phl_fsm_state_goto(pser->fsm_obj, SER_ST_L2);
break;
case FSM_EV_STATE_OUT:
break;
default:
break;
}
return 0;
}
static int ser_usb_idle_st_hdl(void *obj, u16 event, void *param)
{
struct ser_obj *pser = (struct ser_obj *)obj;
switch (event) {
case FSM_EV_SWITCH_IN:
#if 1
/* Disable L0 Reset Notify from FW to driver */
ser_send_l0_handle_method_event(pser, RTW_PHL_SER_L0_CFG_DIS_NOTIFY);
#endif
/* fallthrough */
case FSM_EV_STATE_IN:
#if defined(RTW_WKARD_SER_USB_POLLING_EVENT) && !defined(CONFIG_PHL_CMD_SER)
phl_fsm_set_alarm_ext(pser->fsm_obj,
SER_USB_POLLING_INTERVAL_IDL, SER_EV_POLL_USB_INT_EXPIRE, 1, NULL);
#endif
break;
#ifdef RTW_WKARD_SER_USB_POLLING_EVENT
case SER_EV_POLL_USB_INT_EXPIRE:
ser_usb_chk_int_event(pser);
phl_fsm_set_alarm_ext(pser->fsm_obj,
SER_USB_POLLING_INTERVAL_IDL, SER_EV_POLL_USB_INT_EXPIRE, 1, NULL);
break;
#endif
case SER_EV_L1_START:
#ifndef RTW_WKARD_SER_USB_DISABLE_L1_RCVY_FLOW
phl_fsm_state_goto(pser->fsm_obj, SER_ST_L1_PAUSE_TRX);
#endif
break;
#if 0
case SER_EV_L2_START:
phl_fsm_state_goto(pser->fsm_obj, SER_ST_L2);
break;
#endif
case SER_EV_M9_L2_RESET:
phl_fsm_state_goto(pser->fsm_obj, SER_ST_L2);
break;
case FSM_EV_STATE_OUT:
phl_fsm_cancel_alarm_ext(pser->fsm_obj, 1);
break;
default:
break;
}
return 0;
}
static int ser_pci_l1_pause_trx_st_hdl(void *obj, u16 event, void *param)
{
struct ser_obj *pser = (struct ser_obj *)obj;
struct phl_hci_trx_ops *ops = pser->phl_info->hci_trx_ops;
struct phl_msg msg = {0};
enum rtw_hal_status status = RTW_HAL_STATUS_FAILURE;
SET_MSG_MDL_ID_FIELD(msg.msg_id, PHL_MDL_SER);
SET_MSG_EVT_ID_FIELD(msg.msg_id, MSG_EVT_SER_L1);
switch (event) {
case FSM_EV_STATE_IN:
phl_ser_set_status(pser, SER_L1);
//send msg to core
phl_msg_hub_send(pser->phl_info, NULL, &msg);
ops->req_tx_stop(pser->phl_info);
rtw_phl_tx_req_notify(pser->phl_info);
if(rtw_hal_lv1_rcvy(pser->phl_info->hal, RTW_PHL_SER_LV1_RCVY_STEP_1)
!= RTW_HAL_STATUS_SUCCESS){
ser_resume_trx_process(pser, PHL_CTRL_TX);
rtw_phl_tx_req_notify(pser->phl_info);
phl_fsm_state_goto(pser->fsm_obj, SER_ST_L2);
break;
}
ops->req_rx_stop(pser->phl_info);
rtw_phl_start_rx_process(pser->phl_info);
if (false == ops->is_tx_pause(pser->phl_info) ||
false == ops->is_rx_pause(pser->phl_info)) {
/* pci: polling fail; wait for a while */
phl_fsm_set_alarm(pser->fsm_obj,
SER_POLLING_INTERVAL, SER_EV_POLL_IO_EXPIRE);
/* prevent infinite polling */
pser->poll_io_times = SER_POLL_IO_TIMES;
break;
}
ops->trx_reset(pser->phl_info, PHL_CTRL_TX|PHL_CTRL_RX);
/* pci: send M2 event */
ser_send_m2_event(pser);
/* pci: wait M3 */
phl_fsm_set_alarm(pser->fsm_obj, SER_FW_TIMEOUT,
SER_EV_FW_TIMER_EXPIRE);
break;
case SER_EV_M3_DO_RECOVERY:
phl_fsm_state_goto(pser->fsm_obj, SER_ST_L1_DO_HCI);
break;
case SER_EV_FW_TIMER_EXPIRE:
/*phl_fsm_state_goto(pser->fsm_obj, SER_ST_L2);*/
status = rtw_hal_lv1_rcvy(pser->phl_info->hal, RTW_PHL_SER_LV1_SER_RCVY_STEP_2);
ser_resume_trx_process(pser, PHL_CTRL_RX);
rtw_phl_start_rx_process(pser->phl_info);
ser_resume_trx_process(pser, PHL_CTRL_TX);
rtw_phl_tx_req_notify(pser->phl_info);
if (status != RTW_HAL_STATUS_SUCCESS) {
phl_fsm_state_goto(pser->fsm_obj, SER_ST_L2);
} else {
phl_ser_clear_status(pser, SER_L1);
phl_fsm_state_goto(pser->fsm_obj, SER_ST_IDLE);
}
break;
case SER_EV_M9_L2_RESET:
ser_resume_trx_process(pser, PHL_CTRL_RX);
rtw_phl_start_rx_process(pser->phl_info);
ser_resume_trx_process(pser, PHL_CTRL_TX);
rtw_phl_tx_req_notify(pser->phl_info);
phl_fsm_state_goto(pser->fsm_obj, SER_ST_L2);
break;
case SER_EV_POLL_IO_EXPIRE:
if (pser->poll_io_times-- <= 0) {
#ifdef RTW_WKARD_SER_L1_EXPIRE
ops->trx_reset(pser->phl_info, PHL_CTRL_TX|PHL_CTRL_RX);
/* pci: send M2 event */
ser_send_m2_event(pser);
/* pci: wait M3 */
phl_fsm_set_alarm(pser->fsm_obj, SER_FW_TIMEOUT,
SER_EV_FW_TIMER_EXPIRE);
#else
ser_resume_trx_process(pser, PHL_CTRL_RX);
rtw_phl_start_rx_process(pser->phl_info);
ser_resume_trx_process(pser, PHL_CTRL_TX);
rtw_phl_tx_req_notify(pser->phl_info);
phl_fsm_state_goto(pser->fsm_obj, SER_ST_L2);
#endif
break;
}
/*send msg to core*/
/*phl_msg_hub_send(pser->phl_info, &msg);*/
if (false == ops->is_tx_pause(pser->phl_info) ||
false == ops->is_rx_pause(pser->phl_info) ) {
/* pci: polling fail; wait for a while */
phl_fsm_set_alarm(pser->fsm_obj,
SER_POLLING_INTERVAL, SER_EV_POLL_IO_EXPIRE);
break;
}
/*
if(rtw_hal_lv1_rcvy(hal_info, RTW_PHL_SER_LV1_RCVY_STEP_1) != RTW_HAL_STATUS_SUCCESS){
phl_fsm_state_goto(pser->fsm_obj, SER_ST_L2);
break;
}
*/
ops->trx_reset(pser->phl_info, PHL_CTRL_TX|PHL_CTRL_RX);
/* pci: send M2 event */
ser_send_m2_event(pser);
/* pci: wait M3 */
phl_fsm_set_alarm(pser->fsm_obj, SER_FW_TIMEOUT,
SER_EV_FW_TIMER_EXPIRE);
break;
case FSM_EV_CANCEL:
phl_fsm_state_goto(pser->fsm_obj, SER_ST_IDLE);
break;
case FSM_EV_STATE_OUT:
phl_fsm_cancel_alarm(pser->fsm_obj);
break;
default:
break;
}
return 0;
}
static void ser_usb_l1_pause_trx_done(struct ser_obj *pser)
{
rtw_hal_lv1_rcvy(pser->phl_info->hal, RTW_PHL_SER_LV1_RCVY_STEP_1);
ser_send_m2_event(pser);
}
static int ser_usb_l1_pause_trx_st_hdl(void *obj, u16 event, void *param)
{
struct ser_obj *pser = (struct ser_obj *)obj;
struct phl_hci_trx_ops *ops = pser->phl_info->hci_trx_ops;
struct phl_msg msg = {0};
enum rtw_hal_status status = RTW_HAL_STATUS_FAILURE;
switch (event) {
case FSM_EV_STATE_IN:
phl_ser_set_status(pser, SER_L1);
SET_MSG_MDL_ID_FIELD(msg.msg_id, PHL_MDL_SER);
SET_MSG_EVT_ID_FIELD(msg.msg_id, MSG_EVT_SER_L1);
phl_msg_hub_send(pser->phl_info, NULL, &msg);
ops->req_tx_stop(pser->phl_info);
rtw_phl_tx_req_notify(pser->phl_info);
#if 0
/* not request pause PHL Rx in USB data path */
ops->req_rx_stop(pser->phl_info);
rtw_phl_start_rx_process(pser->phl_info);
#endif
if (false == ops->is_tx_pause(pser->phl_info)
#if 0
|| false == ops->is_rx_pause(pser->phl_info)
#endif
) {
phl_fsm_set_alarm(pser->fsm_obj,
SER_USB_POLLING_INTERVAL_ACT, SER_EV_POLL_IO_EXPIRE);
pser->poll_io_times = SER_USB_POLL_IO_TIMES;
break;
}
ser_usb_l1_pause_trx_done(pser);
#ifdef RTW_WKARD_SER_USB_POLLING_EVENT
phl_fsm_set_alarm_ext(pser->fsm_obj,
SER_USB_POLLING_INTERVAL_ACT, SER_EV_POLL_USB_INT_EXPIRE, 1, NULL);
#endif
phl_fsm_set_alarm(pser->fsm_obj, SER_FW_TIMEOUT,
SER_EV_FW_TIMER_EXPIRE);
break;
#ifdef RTW_WKARD_SER_USB_POLLING_EVENT
case SER_EV_POLL_USB_INT_EXPIRE:
ser_usb_chk_int_event(pser);
phl_fsm_set_alarm_ext(pser->fsm_obj,
SER_USB_POLLING_INTERVAL_ACT, SER_EV_POLL_USB_INT_EXPIRE, 1, NULL);
break;
#endif
case SER_EV_M3_DO_RECOVERY:
phl_fsm_state_goto(pser->fsm_obj, SER_ST_L1_DO_HCI);
break;
case SER_EV_FW_TIMER_EXPIRE:
status = rtw_hal_lv1_rcvy(pser->phl_info->hal, RTW_PHL_SER_LV1_SER_RCVY_STEP_2);
ser_resume_trx_process(pser, PHL_CTRL_TX);
rtw_phl_tx_req_notify(pser->phl_info);
if (status != RTW_HAL_STATUS_SUCCESS)
phl_fsm_state_goto(pser->fsm_obj, SER_ST_L2);
else
phl_fsm_state_goto(pser->fsm_obj, SER_ST_IDLE);
break;
case SER_EV_M9_L2_RESET:
ser_resume_trx_process(pser, PHL_CTRL_TX);
rtw_phl_tx_req_notify(pser->phl_info);
phl_fsm_state_goto(pser->fsm_obj, SER_ST_L2);
break;
case SER_EV_POLL_IO_EXPIRE:
if (pser->poll_io_times-- <= 0) {
#ifdef RTW_WKARD_SER_L1_EXPIRE
ser_usb_l1_pause_trx_done(pser);
phl_fsm_set_alarm_ext(pser->fsm_obj,
SER_USB_POLLING_INTERVAL_ACT, SER_EV_POLL_USB_INT_EXPIRE, 1, NULL);
phl_fsm_set_alarm(pser->fsm_obj, SER_FW_TIMEOUT,
SER_EV_FW_TIMER_EXPIRE);
#else
phl_fsm_state_goto(pser->fsm_obj, SER_ST_L2);
#endif
break;
}
if (false == ops->is_tx_pause(pser->phl_info)
#if 0
|| false == ops->is_rx_pause(pser->phl_info)
#endif
) {
phl_fsm_set_alarm(pser->fsm_obj,
SER_USB_POLLING_INTERVAL_ACT, SER_EV_POLL_IO_EXPIRE);
break;
}
ser_usb_l1_pause_trx_done(pser);
phl_fsm_set_alarm_ext(pser->fsm_obj,
SER_USB_POLLING_INTERVAL_ACT, SER_EV_POLL_USB_INT_EXPIRE, 1, NULL);
phl_fsm_set_alarm(pser->fsm_obj, SER_FW_TIMEOUT,
SER_EV_FW_TIMER_EXPIRE);
break;
case FSM_EV_CANCEL:
phl_fsm_state_goto(pser->fsm_obj, SER_ST_IDLE);
break;
case FSM_EV_STATE_OUT:
phl_fsm_cancel_alarm(pser->fsm_obj);
phl_fsm_cancel_alarm_ext(pser->fsm_obj, 1);
break;
default:
break;
}
return 0;
}
static int ser_sdio_l1_pause_trx_st_hdl(void *obj, u16 event, void *param)
{
struct ser_obj *pser = (struct ser_obj *)obj;
struct phl_hci_trx_ops *ops = pser->phl_info->hci_trx_ops;
struct phl_msg msg = {0};
enum rtw_hal_status status = RTW_HAL_STATUS_FAILURE;
switch (event) {
case FSM_EV_STATE_IN:
phl_ser_set_status(pser, BIT1);
SET_MSG_MDL_ID_FIELD(msg.msg_id, PHL_MDL_SER);
SET_MSG_EVT_ID_FIELD(msg.msg_id, MSG_EVT_SER_L1);
phl_msg_hub_send(pser->phl_info, NULL, &msg);
ops->req_tx_stop(pser->phl_info);
rtw_phl_tx_req_notify(pser->phl_info);
ops->req_rx_stop(pser->phl_info);
rtw_phl_start_rx_process(pser->phl_info);
if (false == ops->is_tx_pause(pser->phl_info)
|| false == ops->is_rx_pause(pser->phl_info)) {
phl_fsm_set_alarm(pser->fsm_obj,
SER_POLLING_INTERVAL, SER_EV_POLL_IO_EXPIRE);
pser->poll_io_times = SER_POLL_IO_TIMES;
break;
}
rtw_hal_config_interrupt(pser->phl_info->hal, RTW_PHL_DIS_HCI_INT);
if (rtw_hal_lv1_rcvy(pser->phl_info->hal, RTW_PHL_SER_LV1_RCVY_STEP_1)
!= RTW_HAL_STATUS_SUCCESS) {
rtw_hal_config_interrupt(pser->phl_info->hal, RTW_PHL_EN_HCI_INT);
phl_fsm_state_goto(pser->fsm_obj, SER_ST_L2);
break;
}
rtw_hal_config_interrupt(pser->phl_info->hal, RTW_PHL_SER_HANDSHAKE_MODE);
ser_send_m2_event(pser);
phl_fsm_set_alarm(pser->fsm_obj, SER_FW_TIMEOUT,
SER_EV_FW_TIMER_EXPIRE);
break;
case SER_EV_CHK_SER_EVENT:
if (true == rtw_hal_recognize_halt_c2h_interrupt(pser->phl_info->hal))
_ser_event_notify(pser->phl_info, NULL);
break;
case SER_EV_M3_DO_RECOVERY:
rtw_hal_clear_interrupt(pser->phl_info->hal);
phl_fsm_state_goto(pser->fsm_obj, SER_ST_L1_DO_HCI);
break;
case SER_EV_FW_TIMER_EXPIRE:
status = rtw_hal_lv1_rcvy(pser->phl_info->hal, RTW_PHL_SER_LV1_SER_RCVY_STEP_2);
ser_resume_trx_process(pser, PHL_CTRL_RX);
rtw_phl_start_rx_process(pser->phl_info);
ser_resume_trx_process(pser, PHL_CTRL_TX);
rtw_phl_tx_req_notify(pser->phl_info);
rtw_hal_config_interrupt(pser->phl_info->hal, RTW_PHL_EN_HCI_INT);
if (status != RTW_HAL_STATUS_SUCCESS) {
phl_fsm_state_goto(pser->fsm_obj, SER_ST_L2);
} else {
phl_ser_clear_status(pser, BIT1);
phl_fsm_state_goto(pser->fsm_obj, SER_ST_IDLE);
}
break;
case SER_EV_M9_L2_RESET:
ser_resume_trx_process(pser, PHL_CTRL_RX);
rtw_phl_start_rx_process(pser->phl_info);
ser_resume_trx_process(pser, PHL_CTRL_TX);
rtw_phl_tx_req_notify(pser->phl_info);
rtw_hal_config_interrupt(pser->phl_info->hal, RTW_PHL_EN_HCI_INT);
phl_fsm_state_goto(pser->fsm_obj, SER_ST_L2);
break;
case SER_EV_POLL_IO_EXPIRE:
if (pser->poll_io_times-- <= 0) {
#ifdef RTW_WKARD_SER_L1_EXPIRE
if (rtw_hal_lv1_rcvy(pser->phl_info->hal, RTW_PHL_SER_LV1_RCVY_STEP_1)
!= RTW_HAL_STATUS_SUCCESS) {
rtw_hal_config_interrupt(pser->phl_info->hal, RTW_PHL_EN_HCI_INT);
phl_fsm_state_goto(pser->fsm_obj, SER_ST_L2);
break;
}
rtw_hal_config_interrupt(pser->phl_info->hal, RTW_PHL_SER_HANDSHAKE_MODE);
ser_send_m2_event(pser);
phl_fsm_set_alarm(pser->fsm_obj, SER_FW_TIMEOUT,
SER_EV_FW_TIMER_EXPIRE);
#else
ser_resume_trx_process(pser, PHL_CTRL_RX);
rtw_phl_start_rx_process(pser->phl_info);
ser_resume_trx_process(pser, PHL_CTRL_TX);
rtw_phl_tx_req_notify(pser->phl_info);
rtw_hal_config_interrupt(pser->phl_info->hal, RTW_PHL_EN_HCI_INT);
phl_fsm_state_goto(pser->fsm_obj, SER_ST_L2);
#endif
break;
}
if (false == ops->is_tx_pause(pser->phl_info) ||
false == ops->is_rx_pause(pser->phl_info)) {
phl_fsm_set_alarm(pser->fsm_obj,
SER_POLLING_INTERVAL, SER_EV_POLL_IO_EXPIRE);
break;
}
rtw_hal_config_interrupt(pser->phl_info->hal, RTW_PHL_DIS_HCI_INT);
if (rtw_hal_lv1_rcvy(pser->phl_info->hal, RTW_PHL_SER_LV1_RCVY_STEP_1)
!= RTW_HAL_STATUS_SUCCESS) {
rtw_hal_config_interrupt(pser->phl_info->hal, RTW_PHL_EN_HCI_INT);
phl_fsm_state_goto(pser->fsm_obj, SER_ST_L2);
break;
}
rtw_hal_config_interrupt(pser->phl_info->hal, RTW_PHL_SER_HANDSHAKE_MODE);
ser_send_m2_event(pser);
phl_fsm_set_alarm(pser->fsm_obj, SER_FW_TIMEOUT,
SER_EV_FW_TIMER_EXPIRE);
break;
case FSM_EV_CANCEL:
rtw_hal_config_interrupt(pser->phl_info->hal, RTW_PHL_EN_HCI_INT);
phl_fsm_state_goto(pser->fsm_obj, SER_ST_IDLE);
break;
case FSM_EV_STATE_OUT:
phl_fsm_cancel_alarm(pser->fsm_obj);
break;
default:
break;
}
return 0;
}
static int ser_pci_l1_do_hci_st_hdl(void *obj, u16 event, void *param)
{
struct ser_obj *pser = (struct ser_obj *)obj;
switch (event) {
case FSM_EV_STATE_IN:
if(rtw_hal_lv1_rcvy(pser->phl_info->hal, RTW_PHL_SER_LV1_SER_RCVY_STEP_2)
!= RTW_HAL_STATUS_SUCCESS){
ser_resume_trx_process(pser, PHL_CTRL_RX);
rtw_phl_start_rx_process(pser->phl_info);
ser_resume_trx_process(pser, PHL_CTRL_TX);
rtw_phl_tx_req_notify(pser->phl_info);
phl_fsm_state_goto(pser->fsm_obj, SER_ST_L2);
break;
}
ser_resume_trx_process(pser, PHL_CTRL_RX);
rtw_phl_start_rx_process(pser->phl_info);
/* pci: send M4 event */
ser_send_m4_event(pser);
/* pci: set timeout to wait M5 */
phl_fsm_set_alarm(pser->fsm_obj, SER_FW_TIMEOUT,
SER_EV_FW_TIMER_EXPIRE);
break;
case SER_EV_M5_READY:
phl_fsm_state_goto(pser->fsm_obj, SER_ST_L1_RESUME_TRX);
break;
case SER_EV_FW_TIMER_EXPIRE:
/* PHL_INFO("ser_pci_l1_do_hci_st_hdl(): SER_EV_FW_TIMER_EXPIRE \n"); */
/* _ser_event_notify(pser->phl_info, NULL); */
/* phl_fsm_state_goto(pser->fsm_obj, SER_ST_L2); */
ser_resume_trx_process(pser, PHL_CTRL_TX);
rtw_phl_tx_req_notify(pser->phl_info);
phl_ser_clear_status(pser, SER_L1);
phl_fsm_state_goto(pser->fsm_obj, SER_ST_IDLE);
break;
case SER_EV_M9_L2_RESET:
ser_resume_trx_process(pser, PHL_CTRL_TX);
rtw_phl_tx_req_notify(pser->phl_info);
phl_fsm_state_goto(pser->fsm_obj, SER_ST_L2);
break;
case FSM_EV_CANCEL:
phl_fsm_state_goto(pser->fsm_obj, SER_ST_IDLE);
break;
case FSM_EV_STATE_OUT:
phl_fsm_cancel_alarm(pser->fsm_obj);
break;
default:
break;
}
return 0;
}
static int ser_usb_l1_do_hci_st_hdl(void *obj, u16 event, void *param)
{
struct ser_obj *pser = (struct ser_obj *)obj;
switch (event) {
case FSM_EV_STATE_IN:
if (rtw_hal_lv1_rcvy(pser->phl_info->hal, RTW_PHL_SER_LV1_SER_RCVY_STEP_2)
!= RTW_HAL_STATUS_SUCCESS) {
phl_fsm_state_goto(pser->fsm_obj, SER_ST_L2);
break;
}
ser_resume_trx_process(pser, PHL_CTRL_RX);
rtw_phl_start_rx_process(pser->phl_info);
ser_send_m4_event(pser);
#ifdef RTW_WKARD_SER_USB_POLLING_EVENT
phl_fsm_set_alarm_ext(pser->fsm_obj,
SER_USB_POLLING_INTERVAL_ACT, SER_EV_POLL_USB_INT_EXPIRE, 1, NULL);
#endif
phl_fsm_set_alarm(pser->fsm_obj, SER_FW_TIMEOUT,
SER_EV_FW_TIMER_EXPIRE);
break;
#ifdef RTW_WKARD_SER_USB_POLLING_EVENT
case SER_EV_POLL_USB_INT_EXPIRE:
ser_usb_chk_int_event(pser);
phl_fsm_set_alarm_ext(pser->fsm_obj,
SER_USB_POLLING_INTERVAL_ACT, SER_EV_POLL_USB_INT_EXPIRE, 1, NULL);
break;
#endif
case SER_EV_M5_READY:
case SER_EV_FW_TIMER_EXPIRE:
phl_fsm_state_goto(pser->fsm_obj, SER_ST_L1_RESUME_TRX);
break;
case SER_EV_M9_L2_RESET:
phl_fsm_state_goto(pser->fsm_obj, SER_ST_L2);
break;
case FSM_EV_CANCEL:
phl_fsm_state_goto(pser->fsm_obj, SER_ST_IDLE);
break;
case FSM_EV_STATE_OUT:
phl_fsm_cancel_alarm(pser->fsm_obj);
phl_fsm_cancel_alarm_ext(pser->fsm_obj, 1);
break;
default:
break;
}
return 0;
}
static int ser_sdio_l1_do_hci_st_hdl(void *obj, u16 event, void *param)
{
struct ser_obj *pser = (struct ser_obj *)obj;
switch (event) {
case FSM_EV_STATE_IN:
if (rtw_hal_lv1_rcvy(pser->phl_info->hal, RTW_PHL_SER_LV1_SER_RCVY_STEP_2)
!= RTW_HAL_STATUS_SUCCESS) {
rtw_hal_config_interrupt(pser->phl_info->hal, RTW_PHL_EN_HCI_INT);
phl_fsm_state_goto(pser->fsm_obj, SER_ST_L2);
break;
}
ser_resume_trx_process(pser, PHL_CTRL_RX);
rtw_phl_start_rx_process(pser->phl_info);
rtw_hal_config_interrupt(pser->phl_info->hal, RTW_PHL_SER_HANDSHAKE_MODE);
ser_send_m4_event(pser);
phl_fsm_set_alarm(pser->fsm_obj, SER_FW_TIMEOUT,
SER_EV_FW_TIMER_EXPIRE);
break;
case SER_EV_CHK_SER_EVENT:
if (true == rtw_hal_recognize_halt_c2h_interrupt(pser->phl_info->hal))
_ser_event_notify(pser->phl_info, NULL);
break;
case SER_EV_M5_READY:
rtw_hal_config_interrupt(pser->phl_info->hal, RTW_PHL_EN_HCI_INT);
phl_fsm_state_goto(pser->fsm_obj, SER_ST_L1_RESUME_TRX);
break;
case SER_EV_FW_TIMER_EXPIRE:
ser_resume_trx_process(pser, PHL_CTRL_TX);
rtw_phl_tx_req_notify(pser->phl_info);
rtw_hal_config_interrupt(pser->phl_info->hal, RTW_PHL_EN_HCI_INT);
phl_fsm_state_goto(pser->fsm_obj, SER_ST_IDLE);
break;
case SER_EV_M9_L2_RESET:
rtw_hal_config_interrupt(pser->phl_info->hal, RTW_PHL_EN_HCI_INT);
phl_fsm_state_goto(pser->fsm_obj, SER_ST_L2);
break;
case FSM_EV_CANCEL:
rtw_hal_config_interrupt(pser->phl_info->hal, RTW_PHL_EN_HCI_INT);
phl_fsm_state_goto(pser->fsm_obj, SER_ST_IDLE);
break;
case FSM_EV_STATE_OUT:
phl_fsm_cancel_alarm(pser->fsm_obj);
break;
default:
break;
}
return 0;
}
static int ser_l1_resume_trx_st_hdl(void *obj, u16 event, void *param)
{
struct ser_obj *pser = (struct ser_obj *)obj;
switch (event) {
case FSM_EV_STATE_IN:
/* TODO resume TRX process */
ser_resume_trx_process(pser, PHL_CTRL_TX);
rtw_phl_tx_req_notify(pser->phl_info);
phl_fsm_state_goto(pser->fsm_obj, SER_ST_IDLE);
break;
case FSM_EV_CANCEL:
phl_fsm_state_goto(pser->fsm_obj, SER_ST_IDLE);
break;
case FSM_EV_STATE_OUT:
phl_fsm_cancel_alarm(pser->fsm_obj);
phl_ser_clear_status(pser, SER_L1);
break;
default:
break;
}
return 0;
}
static int ser_l2_st_hdl(void *obj, u16 event, void *param)
{
struct ser_obj *pser = (struct ser_obj *)obj;
struct phl_msg msg = {0};
switch (event) {
case FSM_EV_STATE_IN:
/*To avoid fw watchdog intr trigger periodically*/
rtw_hal_ser_reset_wdt_intr(pser->phl_info->hal);
/* TODO do something */
phl_ser_clear_status(pser, SER_L1);
phl_ser_set_status(pser, SER_L2);
SET_MSG_MDL_ID_FIELD(msg.msg_id, PHL_MDL_SER);
SET_MSG_EVT_ID_FIELD(msg.msg_id, MSG_EVT_SER_L2);
phl_msg_hub_send(pser->phl_info, NULL, &msg);
phl_fsm_state_goto(pser->fsm_obj, SER_ST_IDLE);
break;
case FSM_EV_TIMER_EXPIRE:
break;
case FSM_EV_CANCEL:
phl_fsm_state_goto(pser->fsm_obj, SER_ST_IDLE);
break;
case FSM_EV_STATE_OUT:
phl_fsm_cancel_alarm(pser->fsm_obj);
break;
default:
break;
}
return 0;
}
static void ser_dump_obj(void *obj, char *s, int *sz)
{
/* nothing to do for now */
}
static void ser_dump_fsm(void *fsm, char *s, int *sz)
{
/* nothing to do for now */
}
/* For EXTERNAL application to create a ser FSM */
/* @root: FSM root structure
* @phl_info: private data structure to invoke hal/phl function
*
* return
* fsm_main: FSM main structure (Do NOT expose)
*/
struct fsm_main *phl_ser_new_fsm(struct fsm_root *root,
struct phl_info_t *phl_info)
{
void *d = phl_to_drvpriv(phl_info);
struct fsm_main *fsm = NULL;
struct rtw_phl_fsm_tb tb;
_os_mem_set(d, &tb, 0, sizeof(tb));
if (phl_info->phl_com->hci_type == RTW_HCI_PCIE)
tb.state_tbl = ser_pci_state_tbl;
else if (phl_info->phl_com->hci_type == RTW_HCI_USB)
tb.state_tbl = ser_usb_state_tbl;
else if (phl_info->phl_com->hci_type == RTW_HCI_SDIO)
tb.state_tbl = ser_sdio_state_tbl;
else
return NULL;
tb.max_state = sizeof(ser_pci_state_tbl)/sizeof(ser_pci_state_tbl[0]);
tb.max_event = sizeof(ser_event_tbl)/sizeof(ser_event_tbl[0]);
tb.evt_tbl = ser_event_tbl;
tb.dump_obj = ser_dump_obj;
tb.dump_obj = ser_dump_fsm;
tb.dbg_level = FSM_DBG_INFO;
tb.evt_level = FSM_DBG_INFO;
fsm = phl_fsm_init_fsm(root, "ser", phl_info, &tb);
return fsm;
}
/* For EXTERNAL application to destory ser fsm */
/* @fsm: see fsm_main
*/
void phl_ser_destory_fsm(struct fsm_main *fsm)
{
if (fsm == NULL)
return;
/* deinit fsm local variable if has */
/* call FSM Framewro to deinit fsm */
phl_fsm_deinit_fsm(fsm);
}
/* For EXTERNAL application to create SER object */
/* @fsm: FSM main structure which created by phl_ser_new_fsm()
* @phl_info: private data structure to invoke hal/phl function
*
* return
* ser_obj: structure of SER object (Do NOT expose)
*/
struct ser_obj *phl_ser_new_obj(struct fsm_main *fsm,
struct phl_info_t *phl_info)
{
void *d = phl_to_drvpriv(phl_info);
struct fsm_obj *obj;
struct ser_obj *pser;
pser = phl_fsm_new_obj(fsm, (void **)&obj, sizeof(*pser));
if (pser == NULL) {
/* TODO free fsm; currently will be freed in deinit process */
FSM_ERR(fsm, "ser: malloc obj fail\n");
return NULL;
}
pser->fsm = fsm;
pser->fsm_obj = obj;
pser->phl_info = phl_info;
/* init local use variable */
_os_spinlock_init(d, &pser->state_lock);
return pser;
}
/* For EXTERNAL application to destory ser object */
/* @pser: local created command object
*
*/
void phl_ser_destory_obj(struct ser_obj *pser)
{
void *d;
if (pser == NULL)
return;
d = phl_to_drvpriv(pser->phl_info);
/* deinit obj local variable if has */
_os_spinlock_free(d, &pser->state_lock);
/* inform FSM framewory to recycle fsm_obj */
phl_fsm_destory_obj(pser->fsm_obj);
}
/* For EXTERNAL application to stop ser service (expose) */
/* @pser: ser job will be cancelled
*/
enum rtw_phl_status phl_ser_cancel(void *phl)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct ser_obj *pser = phl_info->ser_obj;
return phl_fsm_gen_msg(phl, pser->fsm_obj, NULL, 0, FSM_EV_CANCEL);
}
/* For EXTERNAL interrupt handler to send event into ser fsm (expose) */
enum rtw_phl_status phl_ser_event_to_fw(void *phl, u32 err)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
enum rtw_hal_status status;
status = rtw_hal_ser_set_error_status(phl_info->hal ,err);
PHL_TRACE(COMP_PHL_DBG, _PHL_ERR_, "phl_ser_event_to_fw err %d, status 0x%x\n", err, status);
return status;
}
/* For EXTERNAL interrupt handler and dump fw ple (expose) */
enum rtw_phl_status rtw_phl_ser_dump_ple_buffer(void *phl)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
PHL_TRACE(COMP_PHL_DBG, _PHL_WARNING_, "rtw_phl_ser_dump_ple_buffer\n");
rtw_hal_dump_fw_rsvd_ple(phl_info->hal);
return RTW_PHL_STATUS_SUCCESS;
}
/* For EXTERNAL interrupt handler and dump fw ple (expose) */
enum rtw_phl_status phl_fw_watchdog_timeout_notify(void *phl)
{
enum RTW_PHL_SER_NOTIFY_EVENT notify = RTW_PHL_SER_L2_RESET;
PHL_TRACE(COMP_PHL_DBG, _PHL_ERR_, "phl_fw_watchdog_timeout_notify triggle L2 Reset !!!\n");
return phl_ser_send_msg(phl, notify);
}
enum rtw_phl_status rtw_phl_ser_l2_notify(struct rtw_phl_com_t *phl_com)
{
enum RTW_PHL_SER_NOTIFY_EVENT notify = RTW_PHL_SER_L2_RESET;
PHL_TRACE(COMP_PHL_DBG, _PHL_ERR_, "rtw_phl_ser_l2_notify triggle L2 Reset !!!\n");
return phl_ser_send_msg(phl_com->phl_priv, notify);
}
/* @phl: phl_info_t
* @notify: event to ser fsm
*/
#ifndef CONFIG_PHL_CMD_SER
enum rtw_phl_status phl_ser_send_msg(void *phl,
enum RTW_PHL_SER_NOTIFY_EVENT notify)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct ser_obj *pser = phl_info->ser_obj;
u16 event;
switch (notify) {
case RTW_PHL_SER_PAUSE_TRX: /* M1 */
event = SER_EV_L1_START;
break;
case RTW_PHL_SER_DO_RECOVERY: /* M3 */
event = SER_EV_M3_DO_RECOVERY;
break;
case RTW_PHL_SER_READY: /* M5 */
event = SER_EV_M5_READY;
break;
case RTW_PHL_SER_L2_RESET: /* M9 */
event = SER_EV_M9_L2_RESET;
break;
case RTW_PHL_SER_EVENT_CHK:
event = SER_EV_CHK_SER_EVENT;
break;
case RTW_PHL_SER_L0_RESET:
PHL_TRACE(COMP_PHL_DBG, _PHL_DEBUG_, "phl_ser_send_msg(): Unsupported case:%d, please check it\n",
notify);
return RTW_PHL_STATUS_FAILURE;
default:
PHL_TRACE(COMP_PHL_DBG, _PHL_DEBUG_, "phl_ser_send_msg(): Unrecognize case:%d, please check it\n",
notify);
return RTW_PHL_STATUS_FAILURE;
}
PHL_TRACE(COMP_PHL_DBG, _PHL_INFO_, "phl_ser_send_msg event %d\n", event);
return phl_fsm_gen_msg(phl, pser->fsm_obj, NULL, 0, event);
}
/* For EXTERNAL application to do L2 reset (expose) */
/* @pser: ser job will be cancelled
*/
u8 phl_ser_inprogress(void *phl)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct ser_obj *pser = phl_info->ser_obj;
u8 reset_type = 0;
void *d = phl_to_drvpriv(phl_info);
_os_spinlock(d, &pser->state_lock, _bh, NULL);
if(pser->trigger_l1_reset == true) {
reset_type |= SER_L1;
//pser->trigger_l2_reset = false;
}
if(pser->trigger_l2_reset == true) {
reset_type |= SER_L2;
}
_os_spinunlock(d, &pser->state_lock, _bh, NULL);
return reset_type;
}
#endif
/* For EXTERNAL application to do L2 reset (expose) */
/* @pser: ser job will be cancelled
*/
void phl_ser_clear_status(struct ser_obj *pser, u32 serstatus)
{
void *d = phl_to_drvpriv(pser->phl_info);
_os_spinlock(d, &pser->state_lock, _bh, NULL);
if(serstatus & SER_L1) {
pser->trigger_l1_reset = false;
}
if(serstatus & SER_L2) {
pser->trigger_l2_reset = false;
}
_os_spinunlock(d, &pser->state_lock, _bh, NULL);
}
void phl_ser_set_status(struct ser_obj *pser, u32 serstatus)
{
void *d = phl_to_drvpriv(pser->phl_info);
_os_spinlock(d, &pser->state_lock, _bh, NULL);
if(serstatus & SER_L1) {
pser->trigger_l1_reset = true;
}
if(serstatus & SER_L2) {
pser->trigger_l2_reset = true;
}
_os_spinunlock(d, &pser->state_lock, _bh, NULL);
}
/* For EXTERNAL application notify from upper layer*/
void phl_ser_notify_from_upper_watchdog_status(void *phl, bool inprogress)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct ser_obj *pser = phl_info->ser_obj;
pser->dynamicThredRunning = inprogress;
}
void rtw_phl_notify_watchdog_status(void *phl, bool inprogress)
{
phl_ser_notify_from_upper_watchdog_status(phl, inprogress);
}
#endif /*CONFIG_FSM*/
|
2301_81045437/rtl8852be
|
phl/phl_ser_fsm.c
|
C
|
agpl-3.0
| 35,569
|
/******************************************************************************
*
* Copyright(c) 2019 - 2020 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#ifndef __PHL_SER_FSM_H__
#define __PHL_SER_FSM_H__
struct fsm_main *phl_ser_new_fsm(struct fsm_root *root,
struct phl_info_t *phl_info);
void phl_ser_destory_fsm(struct fsm_main *fsm);
struct ser_obj *phl_ser_new_obj(struct fsm_main *fsm,
struct phl_info_t *phl_info);
void phl_ser_destory_obj(struct ser_obj *pser);
enum rtw_phl_status phl_ser_cancel(void *phl);
enum rtw_phl_status phl_ser_event_to_fw(void *phl, u32 err);
enum rtw_phl_status rtw_phl_ser_dump_ple_buffer(void *phl);
enum rtw_phl_status phl_fw_watchdog_timeout_notify(void *phl);
enum rtw_phl_status
phl_ser_send_msg(void *phl, enum RTW_PHL_SER_NOTIFY_EVENT notify);
u8 phl_ser_inprogress(void *phl);
void phl_ser_clear_status(struct ser_obj *pser, u32 serstatus);
void phl_ser_set_status(struct ser_obj *pser, u32 serstatus);
void phl_ser_notify_from_upper_watchdog_status(void *phl, bool inprogress);
u8 rtw_phl_ser_inprogress(void *phl);
void rtw_phl_ser_clear_status(void *phl, u32 serstatus);
void rtw_phl_notify_watchdog_status(void *phl, bool inprogress);
#endif /* __PHL_SER_FSM_H__ */
|
2301_81045437/rtl8852be
|
phl/phl_ser_fsm.h
|
C
|
agpl-3.0
| 1,726
|
/******************************************************************************
*
* Copyright(c) 2019 - 2020 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#include "phl_headers.h"
void __reset_snd_grp(struct phl_snd_grp *grp)
{
u8 i = 0;
grp->snd_type = PHL_SND_TYPE_INVALID;
grp->band = 0;
grp->num_sta = 0;
grp->wrole_idx = 0;
grp->grp_tier = PHL_SND_GRP_TIER_1;
grp->snd_sts = PHL_SND_STS_PENDING;
for (i = 0; i < MAX_NUM_STA_SND_GRP; i++) {
grp->sta[i].valid = false;
grp->sta[i].macid = 0;
grp->sta[i].bw = CHANNEL_WIDTH_20;
grp->sta[i].snd_fb_t = PHL_SND_FB_TYPE_SU;
grp->sta[i].npda_sta_info = 0;
grp->sta[i].bf_entry = NULL;
grp->sta[i].snd_sts = PHL_SND_STS_PENDING;
}
}
enum rtw_phl_status _phl_snd_init_snd_grp(
struct phl_info_t *phl_info)
{
enum rtw_phl_status status = RTW_PHL_STATUS_SUCCESS;
struct phl_sound_obj *snd = (struct phl_sound_obj *)phl_info->snd_obj;
struct phl_sound_param *param = &snd->snd_param;
u8 i = 0;
do {
if (param->snd_grp == NULL) {
status = RTW_PHL_STATUS_FAILURE;
break;
}
for (i = 0; i < MAX_SND_GRP_NUM; i++) {
__reset_snd_grp(¶m->snd_grp[i]);
param->snd_grp[i].gidx = i;
}
} while (0);
return status;
}
#ifdef CONFIG_FSM
/* For EXTERNAL application to create Sound object */
/* @fsm: FSM main structure which created by phl_snd_new_fsm()
* @phl_info: private data structure to invoke hal/phl function
*
* return
*/
enum rtw_phl_status phl_snd_new_obj(
struct fsm_main *fsm,
struct phl_info_t *phl_info)
{
enum rtw_phl_status status = RTW_PHL_STATUS_SUCCESS;
struct phl_sound_obj *snd_obj = NULL;
struct fsm_obj *obj = NULL;
void *drv_priv = phl_to_drvpriv(phl_info);
FUNCIN();
do {
snd_obj = phl_fsm_new_obj(
fsm, (void **)&obj, sizeof(*snd_obj));
if (snd_obj == NULL) {
status = RTW_PHL_STATUS_RESOURCE;
break;
}
phl_info->snd_obj = snd_obj;
snd_obj->fsm = fsm;
snd_obj->fsm_obj = obj;
snd_obj->phl_info = phl_info;
/*Init the snd group static resources here*/
status = _phl_snd_init_snd_grp(phl_info);
/* init obj local use variable */
PHL_INFO("snd_fsm_func_init_st_hdl : PHL SND FSM Module Start Work\n");
_os_spinlock_init(drv_priv, &snd_obj->snd_lock);
_os_spinlock_init(drv_priv, &snd_obj->cmd_lock);
phl_snd_func_snd_init(snd_obj->phl_info);
} while (0);
if (RTW_PHL_STATUS_SUCCESS != status) {
PHL_ERR("phl_snd_init_obj FAIL\n");
/* phl fsm module will handle to free the phl fsm related object*/
/* phl_snd_deinit_obj(phl_info); */
}
FUNCOUT();
return status;
}
#endif
/* PHL SOUND EXTERNAL APIs */
/* get sounding in progress */
u8 rtw_phl_snd_chk_in_progress(void *phl)
{
u8 ret = 0;
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct phl_sound_obj *snd = (struct phl_sound_obj *)phl_info->snd_obj;
void *d = phl_to_drvpriv(phl_info);
_os_spinlock(d, &snd->snd_lock, _bh, NULL);
ret = snd->snd_in_progress;
_os_spinunlock(d, &snd->snd_lock, _bh, NULL);
return ret;
}
/**
* rtw_phl_sound_start
* @phl:(struct phl_info_t *)
* @st_dlg_tkn: start dialog token value, if 0, it will use previous sounding dialog token;
* @period: sounding process period (group--> next group)
* @test_flag: test mode flags
**/
enum rtw_phl_status
rtw_phl_sound_start(void *phl, u8 wrole_idx, u8 st_dlg_tkn, u8 period, u8 test_flag)
{
#ifdef CONFIG_FSM
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct phl_sound_obj *snd = (struct phl_sound_obj *)phl_info->snd_obj;
struct phl_snd_start_req snd_req;
snd_req.wrole = (void *)phl_get_wrole_by_ridx(phl_info, wrole_idx);
snd_req.dialog_token = (st_dlg_tkn == 0) ?
snd->snd_param.snd_dialog_token : st_dlg_tkn;
snd_req.proc_timeout_ms = SND_PROC_DEFAULT_TIMEOUT; /* Default Value */
snd_req.proc_period = (period > SND_PROC_DEFAULT_PERIOD) ?
SND_PROC_DEFAULT_PERIOD : period; /*MAX = Default Value */
snd_req.test_flag = test_flag;
if (test_flag&PHL_SND_TEST_F_PASS_STS_CHK)
snd_req.bypass_sts_chk = true;
else
snd_req.bypass_sts_chk = false; /* Default False */
return phl_snd_fsm_ev_start_func(phl, &snd_req);
#else
return RTW_PHL_STATUS_FAILURE;
#endif
}
enum rtw_phl_status
rtw_phl_sound_down_ev(void *phl)
{
enum rtw_phl_status status = RTW_PHL_STATUS_SUCCESS;
#ifdef CONFIG_FSM
status = phl_snd_fsm_ev_c2h_snd_down(phl);
#else
status = RTW_PHL_STATUS_FAILURE;
#endif
return status;
}
enum rtw_phl_status
rtw_phl_sound_abort(void *phl)
{
#ifdef CONFIG_FSM
return phl_snd_fsm_ev_abort(phl);
#else
return RTW_PHL_STATUS_FAILURE;
#endif
}
/* set fixed mode parameters APIs*/
void rtw_phl_snd_dump_fix_para(struct phl_info_t *phl_info)
{
struct phl_sound_obj *snd = (struct phl_sound_obj *)phl_info->snd_obj;
struct phl_snd_fix_param *para = NULL;
u8 i = 0;
para = &snd->snd_param.fix_param;
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_, "===> rtw_phl_snd_fix_dump_para \n");
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_, "test_flag = 0x%x \n", snd->snd_param.test_flag);
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_, "en_fix_gidx = %d \n", para->en_fix_gidx ? 1 : 0);
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_, "en_fix_fb_type = %d \n", para->en_fix_fb_type ? 1 : 0);
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_, "en_fix_sta = %d \n", para->en_fix_sta ? 1 : 0);
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_, "en_fix_snd_bw = %d \n", para->en_fix_snd_bw ? 1 : 0);
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_, "grp_idx = %d \n", para->grp_idx);
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_, "snd_fb_type = %d \n", para->snd_fb_type);
for (i = 0; i < MAX_NUM_STA_SND_GRP; i++) {
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_, "sta_macid[i] = 0x%x \n", para->sta_macid[i]);
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_, "bw[i] = %d \n",para->bw[i]);
}
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_, "<=== rtw_phl_snd_fix_dump_para \n");
}
/* fixed group idx */
void rtw_phl_snd_fix_gidx(struct phl_info_t *phl_info, bool en, u8 gidx)
{
struct phl_sound_obj *snd = (struct phl_sound_obj *)phl_info->snd_obj;
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_, "rtw_phl_snd_fix_gidx() set sounding gidx = 0x%x\n", gidx);
if (en) {
snd->snd_param.fix_param.en_fix_gidx = 1;
snd->snd_param.fix_param.grp_idx = gidx;
} else {
snd->snd_param.fix_param.en_fix_gidx = 0;
}
}
/* fixed snd feedback type */
void rtw_phl_snd_fix_snd_fb_type(struct phl_info_t *phl_info,
bool en, enum snd_fb_type fb_type)
{
struct phl_sound_obj *snd = (struct phl_sound_obj *)phl_info->snd_obj;
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_, "rtw_phl_snd_fix_gidx() set sounding fb_type = 0x%x\n",
fb_type);
if (en) {
snd->snd_param.fix_param.en_fix_fb_type = 1;
snd->snd_param.fix_param.snd_fb_type = fb_type;
} else {
snd->snd_param.fix_param.en_fix_fb_type = 0;
}
}
/* fixed sounding sta macids */
void rtw_phl_snd_fix_set_sta(struct phl_info_t *phl_info,
bool en, u8 sidx, u16 macid)
{
struct phl_sound_obj *snd = (struct phl_sound_obj *)phl_info->snd_obj;
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_, "rtw_phl_snd_fix_set_sta() set sta[%d] macid = 0x%x\n",
sidx, macid);
if (en) {
snd->snd_param.fix_param.en_fix_sta = 1;
if (sidx < MAX_NUM_STA_SND_GRP)
snd->snd_param.fix_param.sta_macid[sidx] = macid;
else
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_, "ERROR, sidx >= 4\n");
} else {
snd->snd_param.fix_param.en_fix_sta = 0;
}
}
/* fixed sounding sta bw */
void rtw_phl_snd_fix_set_bw(struct phl_info_t *phl_info,
bool en, u8 sidx, enum channel_width bw)
{
struct phl_sound_obj *snd = (struct phl_sound_obj *)phl_info->snd_obj;
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_, "rtw_phl_snd_fix_set_bw() set sta[%d] bw = 0x%x\n", sidx, bw);
if (en) {
snd->snd_param.fix_param.en_fix_snd_bw = 1;
if (sidx < MAX_NUM_STA_SND_GRP)
snd->snd_param.fix_param.bw[sidx] = bw;
else
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_, "ERROR, sidx >= 4\n");
} else {
snd->snd_param.fix_param.en_fix_snd_bw = 0;
}
}
/* set forced fw tx mu-mimo (forced fw tx decision) */
void rtw_phl_snd_fix_tx_he_mu(struct phl_info_t *phl_info, u8 gid, bool en)
{
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_, "rtw_phl_snd_fix_tx_mu_para()\n");
rtw_hal_bf_set_txmu_para(phl_info->hal, gid, en,
HAL_PROT_NO_PROETCT, HAL_ACK_N_USER_BA);
rtw_hal_bf_set_fix_mode(phl_info->hal, gid, en);
}
/* PHL SOUND INTERNAL APIs */
/* SND FUNC */
enum rtw_phl_status
phl_snd_func_snd_init(struct phl_info_t *phl_info)
{
struct phl_sound_obj *snd = (struct phl_sound_obj *)phl_info->snd_obj;
void *d = phl_to_drvpriv(phl_info);
enum rtw_phl_status pstatus = RTW_PHL_STATUS_SUCCESS;
u8 f_ru_tbl_80m[MAX_SND_HE_BFRP_USER_NUM][MAX_SND_HE_BFRP_USER_NUM] = {
{RTW_HE_RU996_1, RTW_HE_RU996_1, RTW_HE_RU996_1 ,RTW_HE_RU996_1},
{RTW_HE_RU484_1, RTW_HE_RU484_2, RTW_HE_RU996_1 ,RTW_HE_RU996_1},
{RTW_HE_RU484_1, RTW_HE_RU242_3, RTW_HE_RU242_4 ,RTW_HE_RU996_1},
{RTW_HE_RU242_1, RTW_HE_RU242_2, RTW_HE_RU242_3 ,RTW_HE_RU242_4}
};
u8 f_ru_tbl_20m[MAX_SND_HE_BFRP_USER_NUM][MAX_SND_HE_BFRP_USER_NUM] = {
{RTW_HE_RU242_1, RTW_HE_RU242_1, RTW_HE_RU242_1, RTW_HE_RU242_1},
{RTW_HE_RU106_1, RTW_HE_RU106_1, RTW_HE_RU242_1, RTW_HE_RU242_1},
{RTW_HE_RU106_1, RTW_HE_RU52_3, RTW_HE_RU52_4, RTW_HE_RU242_1},
{RTW_HE_RU52_1, RTW_HE_RU52_2, RTW_HE_RU52_3, RTW_HE_RU52_4}
};
/* Add Other Sounding FUNC/PRCO Initialization Here */
snd->snd_param.snd_proc_timeout_ms = SND_PROC_DEFAULT_TIMEOUT;/* ms */
snd->snd_param.cur_proc_grp_idx = 0;
snd->snd_param.pre_proc_grp_idx = 0;
snd->snd_param.snd_dialog_token = 1;
snd->snd_param.snd_func_grp_num = 0;
snd->snd_param.grp_used_map = 0;
snd->snd_param.snd_proc_period = SND_PROC_DEFAULT_PERIOD;
snd->snd_param.snd_fail_counter = 0;
/*fixed_ru_tbl*/
_os_mem_cpy(d, snd->snd_param.fix_param.f_ru_tbl_20, f_ru_tbl_20m,
MAX_SND_HE_BFRP_USER_NUM * MAX_SND_HE_BFRP_USER_NUM);
_os_mem_cpy(d, snd->snd_param.fix_param.f_ru_tbl_80, f_ru_tbl_80m,
MAX_SND_HE_BFRP_USER_NUM * MAX_SND_HE_BFRP_USER_NUM);
return pstatus;
}
enum rtw_phl_status
phl_snd_func_pre_config(struct phl_info_t *phl_info)
{
struct phl_sound_obj *snd = (struct phl_sound_obj *)phl_info->snd_obj;
struct phl_sound_param *snd_param = &snd->snd_param;
enum rtw_phl_status pstatus = RTW_PHL_STATUS_SUCCESS;
void *d = phl_to_drvpriv(phl_info);
snd_param->proc_start_time = _os_get_cur_time_ms();
snd_param->cur_proc_grp_idx = 0; /* default start from group idx 0 */
snd_param->pre_proc_grp_idx = 0;
_os_spinlock(d, &snd->snd_lock, _bh, NULL);
snd->is_terminated = 0;
snd->snd_in_progress = 1;
_os_spinunlock(d, &snd->snd_lock, _bh, NULL);
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_, "PHL SND FUNC Start with SND Dialog Token = 0x%x\n",
snd_param->snd_dialog_token);
return pstatus;
}
/* SND_FUNC : GROUP related */
/**
* phl_snd_proc_get_grp()
* get the grp(struct phl_sound_grp *) with group index.
* input:
* @gidx: group idx.
* return:
* @grp: (struct phl_snd_grp *grp), NULL = FAIL;
*/
struct phl_snd_grp *
phl_snd_get_grp_byidx(struct phl_info_t *phl_info, u8 gidx)
{
struct phl_sound_obj *snd = (struct phl_sound_obj *)phl_info->snd_obj;
struct phl_sound_param *snd_param = &snd->snd_param;
struct phl_snd_grp *grp = NULL;
do {
if (gidx >= MAX_SND_GRP_NUM)
break;
if (!(snd_param->grp_used_map & BIT(gidx)))
break;
if (0 == snd_param->snd_grp[gidx].num_sta)
break;
if (PHL_SND_TYPE_INVALID == snd_param->snd_grp[gidx].snd_type)
break;
grp = &snd_param->snd_grp[gidx];
} while (0);
return grp;
}
/**
* phl_snd_func_remove_grp()
* remove the target sounding grp from sound process;
* input:
* @grp: (struct phl_snd_grp *) target sounding grp,
*/
enum rtw_phl_status
phl_snd_func_remove_grp(struct phl_info_t *phl_info, struct phl_snd_grp *grp)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
struct phl_sound_obj *snd = (struct phl_sound_obj *)phl_info->snd_obj;
struct phl_sound_param *snd_param = &snd->snd_param;
if (grp == NULL) {
return pstatus;
}
if (snd_param->grp_used_map & BIT(grp->gidx)) {
/* Check and Release all the BF resource */
pstatus = phl_snd_proc_release_res(phl_info, grp);
if (pstatus != RTW_PHL_STATUS_SUCCESS) {
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_, "PHL SND Remove Grp : release BF resouce fail\n");
}
/* Reset group content to default value */
__reset_snd_grp(grp);
/* Clear Group BIT */
snd_param->grp_used_map &= ~BIT(grp->gidx);
snd_param->snd_func_grp_num--;
} else {
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_, "PHL SND Remove Grp : Group is not in used!!!\n");
}
return pstatus;
}
/**
* phl_snd_func_remove_grp_all()
* remove the all of the sounding grp from sound process;
*/
void
phl_snd_func_remove_grp_all(struct phl_info_t *phl_info)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_SUCCESS;
struct phl_snd_grp *grp = NULL;
u8 idx = 0;
for(idx = 0; idx < MAX_SND_GRP_NUM; idx++) {
grp = phl_snd_get_grp_byidx(phl_info, idx);
if (grp != NULL) {
pstatus = phl_snd_func_remove_grp(phl_info, grp);
if (pstatus != RTW_PHL_STATUS_SUCCESS) {
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_, "Remove SND GRP[%d] Fail\n", idx);
}
}
}
}
/**
* _phl_snd_get_available_gidx()
* Get available group resource.
* return:
* @gidx: u8, the group idx in snd_param->grp[n]
*/
u8 _phl_snd_get_available_gidx(struct phl_sound_obj *snd)
{
struct phl_sound_param *param = &snd->snd_param;
u8 gidx = MAX_SND_GRP_NUM;
for (gidx = 0; gidx < MAX_SND_GRP_NUM; gidx++) {
if (!(param->grp_used_map & BIT(gidx))) {
param->grp_used_map |= BIT(gidx);
break;
}
}
return gidx;
}
/**
* _phl_snd_func_grp_add_sta()
* Add the STA into sounding group.
* input:
* @sta: (struct rtw_phl_stainfo_t *) the target sta to be added.
* the function will use the macid / bw information in sta_info;
* @gidx: the group idx to add
*/
enum rtw_phl_status
_phl_snd_func_grp_add_sta(
struct phl_info_t *phl_info, struct rtw_phl_stainfo_t *sta, u8 gidx)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
struct phl_sound_obj *snd = (struct phl_sound_obj *)phl_info->snd_obj;
struct phl_sound_param *snd_param = &snd->snd_param;
struct phl_snd_grp *grp = NULL;
u8 i = 0;
bool chk = false;
do {
if (NULL == sta) {
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_, "STA == NULL !!!!\n");
break;
}
if (gidx >= MAX_SND_GRP_NUM) {
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_, "Get SND Grp Resource Fail : gidx >= MAX_SND_GRP_NUM\n");
break;
}
grp = &snd_param->snd_grp[gidx];
/* check grp->sta[i].macid with sta->macid, skip it if same.*/
for (i = 0; i < grp->num_sta; i++) {
if(grp->sta[i].macid == sta->macid) {
chk = true;
break;
}
}
if (true == chk)
break;
if (grp->num_sta >= MAX_NUM_STA_SND_GRP) {
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_, "The SND Grp is already has 4 STAs\n");
break;
}
grp->sta[grp->num_sta].macid = sta->macid;
grp->sta[grp->num_sta].snd_sts = PHL_SND_STS_PENDING;
grp->sta[grp->num_sta].bw = sta->chandef.bw;
grp->sta[grp->num_sta].valid = true;
grp->num_sta++;
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_, "sta bw = %d\n", sta->chandef.bw);
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_, "grp->num_sta = %d\n", grp->num_sta);
pstatus = RTW_PHL_STATUS_SUCCESS;
} while (0);
return pstatus;
}
/**
* phl_snd_func_add_snd_grp :
* Add a Sounding Group with Primary STA for FW Sounding
* @phl_info: struct phl_info_t *
* @he_snd: 1 = HE , 0 =VHT
* @gidx: return value, snd group idxx in group list
* @psta: (struct rtw_phl_stainfo_t *)Primary Sounding STA,
* if pSTA is unavailable , SND PROC for this group will be terminated.
**/
enum rtw_phl_status
phl_snd_func_add_snd_grp(
struct phl_info_t *phl_info, bool he_snd,
u8 wrole_idx, struct rtw_phl_stainfo_t *psta, u8 *gidx)
{
struct phl_sound_obj *snd = (struct phl_sound_obj *)phl_info->snd_obj;
struct phl_sound_param *snd_param = &snd->snd_param;
struct phl_snd_grp *grp = NULL;
enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
do {
/* Check Primary STA Available*/
if (psta == NULL) {
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_, "psta == NULL\n");
break;
}
/* Get available sounding group resource */
*gidx = _phl_snd_get_available_gidx(snd);
if (*gidx >= MAX_SND_GRP_NUM) {
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_, "Get SND Grp Resource Fail : gidx >= MAX_SND_GRP_NUM\n");
break;
}
grp = &(snd_param->snd_grp[*gidx]);
grp->band = psta->wrole->hw_band;
grp->snd_type = he_snd ? PHL_SND_TYPE_HE_SW :
PHL_SND_TYPE_VHT_SW;
grp->wrole_idx = wrole_idx;
grp->snd_sts = PHL_SND_STS_PENDING;
grp->num_sta = 0;
/* Primary STA use idx-0 */
_phl_snd_func_grp_add_sta(phl_info, psta, *gidx);
snd_param->snd_func_grp_num++;
pstatus = RTW_PHL_STATUS_SUCCESS;
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_, "phl_snd_func_add_snd_grp : Add group[%d] Success\n",
*gidx);
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_, "phl_snd_func_add_snd_grp : grp->snd_type 0x%x\n",
grp->snd_type);
} while (0);
return pstatus;
}
/**
* _phl_snd_func_set_grp_fb_mu()
* Set the whole sounding grp's feedback type = MU
* input:
* @grp: (struct phl_snd_grp *) the target group.
*/
void _phl_snd_func_set_grp_fb_mu(struct phl_snd_grp *grp)
{
u8 i = 0;
if (grp == NULL)
return;
for (i = 0; i < grp->num_sta; i++) {
grp->sta[i].snd_fb_t = PHL_SND_FB_TYPE_MU;
}
}
/**
* phl_snd_func_grouping()
* function for soundind fsm state : SND_FUNC_READY
* input:
* @wroleidx: the index of wrole which the sounding proc run under with.
*/
enum rtw_phl_status
phl_snd_func_grouping(struct phl_info_t *phl_info, u8 wroleidx)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
struct phl_sound_obj *snd = (struct phl_sound_obj *)phl_info->snd_obj;
struct phl_sound_param *snd_param = &snd->snd_param;
struct phl_snd_fix_param *fix_para = &snd->snd_param.fix_param;
struct rtw_wifi_role_t *wrole = NULL;
struct rtw_phl_stainfo_t *self = NULL, *sta;
struct phl_snd_grp *grp = NULL;
void *drv = phl_to_drvpriv(phl_info);
struct phl_queue *sta_queue;
u8 gidx = 0;
u8 cnt = 0;
wrole = phl_get_wrole_by_ridx(phl_info, wroleidx);
/* if wrole(STA) is linked, seft = AP */
/* if wrole is AP, self = ???? */
self = rtw_phl_get_stainfo_self(phl_info, wrole);
if (self == NULL) {
PHL_ERR("Cannot get self's phl_sta\n");
return pstatus;
}
sta_queue = &wrole->assoc_sta_queue;
if (PHL_RTYPE_STATION == wrole->type) {
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_, " PHL_RTYPE_STATION == wrole->type \n");
/* STA Mode : Only SU TxBF with AP */
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_, "self->macid = 0x%x \n", self->macid);
debug_dump_mac_address(self->mac_addr);
pstatus = phl_snd_func_add_snd_grp(
phl_info,
(self->wmode & WLAN_MD_11AX) ? true :
false,
wrole->id, self, &gidx);
grp = &snd_param->snd_grp[gidx];
grp->grp_tier = PHL_SND_GRP_TIER_0;
grp->sta[0].snd_fb_t = PHL_SND_FB_TYPE_SU;
grp->snd_type = (self->wmode & WLAN_MD_11AX) ?
PHL_SND_TYPE_HE_HW : PHL_SND_TYPE_VHT_HW;
} else {
#if 1
/* Test Code: Group-1 :Forced MU Sounding with first 1~4 STAs */
/* the mu sounding list shall get from mu grouping module */
cnt = 0;
_os_spinlock(drv, &sta_queue->lock, _bh, NULL);
phl_list_for_loop(sta, struct rtw_phl_stainfo_t,
&wrole->assoc_sta_queue.queue, list) {
if (is_broadcast_mac_addr(sta->mac_addr))
continue;
if (sta == self)
continue;
/* First STA */
if (cnt == 0) {
pstatus = phl_snd_func_add_snd_grp(
phl_info,
(sta->wmode & WLAN_MD_11AX) ?
true : false,
wrole->id, sta, &gidx);
if (pstatus != RTW_PHL_STATUS_SUCCESS)
break;
} else {
/* get next associated sta and add to group */
_phl_snd_func_grp_add_sta(phl_info, sta, gidx);
}
cnt++;
if (cnt >= 4)
break;
}
_os_spinunlock(drv, &sta_queue->lock, _bh, NULL);
if(pstatus != RTW_PHL_STATUS_SUCCESS)
return RTW_PHL_STATUS_FAILURE;
grp = &snd_param->snd_grp[gidx];
grp->grp_tier = PHL_SND_GRP_TIER_0;
/* Test : forced MU */
_phl_snd_func_set_grp_fb_mu(&snd_param->snd_grp[gidx]);
#endif
}
/*TODO: fixed paramters gidx when multi-group */
if (snd_param->test_flag&PHL_SND_TEST_F_GRP_SND_PARA) {
/*Test Mode force set the group fb type = MU */
if (fix_para->en_fix_fb_type) {
if (PHL_SND_FB_TYPE_MU == fix_para->snd_fb_type) {
_phl_snd_func_set_grp_fb_mu(
&snd_param->snd_grp[gidx]);
}
/**
* Note : 8852A only support two CSI Buffer for SU,
* take care that num of STA in SU sounding of a group shall < 2.
**/
}
if(fix_para->en_fix_snd_bw) {
grp = &snd_param->snd_grp[gidx];
for (cnt = 0; cnt < MAX_NUM_STA_SND_GRP; cnt++) {
if (grp->sta[cnt].valid)
grp->sta[cnt].bw = fix_para->bw[cnt];
}
}
} else {
grp = &snd_param->snd_grp[gidx];
if (grp->num_sta > 2) {
/* forced using MU feedback because of SU CSI buffer number */
_phl_snd_func_set_grp_fb_mu(&snd_param->snd_grp[gidx]);
}
}
if (snd_param->test_flag & PHL_SND_TEST_F_GRP_EN_BF_FIX) {
snd_param->snd_grp[gidx].en_fix_mode = 1; /* post confg forced mode setting */
}
return pstatus;
}
/* SND PROC */
/* Free BF/CQI resource */
enum rtw_phl_status
_phl_snd_proc_release_res_cqi(
struct phl_info_t *phl_info, struct phl_snd_grp *grp)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_SUCCESS;
/*CQI Fb doesn't query any resource*/
return pstatus;
}
enum rtw_phl_status
_phl_snd_proc_release_res_bf(
struct phl_info_t *phl_info, struct phl_snd_grp *grp)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_SUCCESS;
enum rtw_hal_status hstatus = RTW_HAL_STATUS_SUCCESS;
struct phl_snd_sta *snd_sta;
struct rtw_phl_stainfo_t *sta = NULL;
u8 idx = 0;
for (idx = 0; idx < grp->num_sta; idx++) {
snd_sta = &grp->sta[idx];
if(0 == snd_sta->valid)
continue;
sta = rtw_phl_get_stainfo_by_macid(
phl_info, snd_sta->macid);
if (NULL == sta) {
PHL_ERR("_phl_snd_proc_release_res_bf: Cannot find STA macid 0x%x in PHL STA Info List \n",
snd_sta->macid);
continue;
}
if (NULL == sta->hal_sta->bf_entry)
continue;
hstatus = rtw_hal_snd_release_proc_sta_res(phl_info->hal, sta);
if(hstatus != RTW_HAL_STATUS_SUCCESS) {
PHL_ERR("_phl_snd_proc_release_res_bf: macid 0x%x Free Sounding Resource FAIL \n",
snd_sta->macid);
continue;
}
/* un link the bf entry to STA info */
sta->hal_sta->bf_entry = NULL;
}
return pstatus;
}
/**
* phl_snd_proc_release_res:
* Release the sounding resource for the group
* @phl_info: phl_info_t
* @grp: (struct phl_snd_grp *) sounding gorup for release resource
**/
enum rtw_phl_status
phl_snd_proc_release_res(struct phl_info_t *phl_info, struct phl_snd_grp *grp)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_SUCCESS;
struct phl_snd_sta *snd_sta;
snd_sta = &grp->sta[0];
if (snd_sta->snd_fb_t == PHL_SND_FB_TYPE_CQI)
pstatus = _phl_snd_proc_release_res_cqi(phl_info, grp);
else
pstatus = _phl_snd_proc_release_res_bf(phl_info, grp);
return pstatus;
}
/**
* _phl_snd_proc_get_bf_res_cqi_fb:
* CQI Sounding doesn't need BF Reresource
* @phl_info: phl_info_t
* @grp: (struct phl_sound_grp *) sounding gorup
* @nsta: return value : how many sta query resource success
**/
enum rtw_phl_status
_phl_snd_proc_get_res_cqi_fb(
struct phl_info_t *phl_info, struct phl_snd_grp *grp, u8 *nsta)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_SUCCESS;
struct phl_snd_sta *snd_sta;
u8 idx = 0;
struct rtw_phl_stainfo_t *sta = NULL;
*nsta = 0;
for (idx = 0; idx < grp->num_sta; idx++) {
snd_sta = &grp->sta[idx];
sta = rtw_phl_get_stainfo_by_macid(phl_info, snd_sta->macid);
if (NULL == sta) {
PHL_ERR("phl_snd_proc_get_bf_res: Cannot find STA macid 0x%x in PHL STA Info List \n",
snd_sta->macid);
continue;
}
rtw_hal_snd_ndpa_sta_info_he(
sta,
&snd_sta->npda_sta_info,
snd_sta->bw,
PHL_SND_FB_TYPE_CQI);
(*nsta)++;
}
if (*nsta == 0) {
grp->snd_sts = PHL_SND_STS_FAILURE;
pstatus = RTW_PHL_STATUS_FAILURE;
}
if (*nsta != grp->num_sta) {
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_, " Sounding STAs is fewer than group sta because of resource!");
}
return pstatus;
}
/**
* _phl_snd_proc_get_res_bf:
* Get BF Resource for SU/MU Sounding
* @phl_info: phl_info_t
* @grp: (struct phl_sound_grp *) sounding gorup
* @nsta: return value : how many sta query bf resource success
**/
enum rtw_phl_status
_phl_snd_proc_get_res_bf(
struct phl_info_t *phl_info, struct phl_snd_grp *grp, u8 *nsta)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_SUCCESS;
enum rtw_hal_status hstatus = RTW_HAL_STATUS_SUCCESS;
struct phl_snd_sta *snd_sta;
u8 idx = 0;
struct rtw_phl_stainfo_t *sta = NULL;
bool mu, qry_bf;
*nsta = 0;
for (idx = 0; idx < grp->num_sta; idx++) {
snd_sta = &grp->sta[idx];
sta = rtw_phl_get_stainfo_by_macid(phl_info, snd_sta->macid);
if (NULL == sta) {
PHL_ERR("phl_snd_proc_get_bf_res: Cannot find STA macid 0x%x in PHL STA Info List \n",
snd_sta->macid);
continue;
}
mu = (snd_sta->snd_fb_t == PHL_SND_FB_TYPE_MU) ? true:false;
qry_bf = false;
if (sta->hal_sta->bf_entry != NULL) {
/* The sta already have BF reource */
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_, " sta->bf_entry != NULL\n");
/* Check the BF resource */
hstatus = rtw_hal_snd_chk_bf_res(phl_info->hal,
sta, mu, sta->chandef.bw);
if (RTW_HAL_STATUS_FAILURE == hstatus) {
rtw_hal_snd_release_proc_sta_res(phl_info->hal,
sta);
qry_bf = true;
} else {
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_, "Use Original BF Resource \n");
qry_bf = false;
}
} else {
qry_bf = true;
}
if (true == qry_bf) {
hstatus = rtw_hal_snd_query_proc_sta_res(
phl_info->hal, sta, mu, sta->chandef.bw,
grp->en_swap_mode);
if (hstatus != RTW_HAL_STATUS_SUCCESS) {
PHL_ERR("phl_snd_proc_get_bf_res: macid 0x%x query sounding resource FAIL \n",
snd_sta->macid);
if (grp->en_swap_mode) {
break;/* break in swap mode if one of sta query bf res fail */
}
continue;
}
}
if (grp->snd_type >= PHL_SND_TYPE_HE_HW) {
rtw_hal_snd_ndpa_sta_info_he(
sta,
&snd_sta->npda_sta_info,
snd_sta->bw,
snd_sta->snd_fb_t);
} else {
rtw_hal_snd_ndpa_sta_info_vht(sta,
&snd_sta->npda_sta_info, mu);
}
/* Link STA information to Group Information */
snd_sta->bf_entry = sta->hal_sta->bf_entry;
(*nsta)++;
}
if (*nsta == 0) {
grp->snd_sts = PHL_SND_STS_FAILURE;
pstatus = RTW_PHL_STATUS_FAILURE;
}
if (*nsta != grp->num_sta) {
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_, "FAIL : Sounding STAs is fewer than group sta because of resource!\n");
pstatus = RTW_PHL_STATUS_FAILURE;
}
return pstatus;
}
/* snd proc get BF/CSI resource */
enum rtw_phl_status
phl_snd_proc_get_res(
struct phl_info_t *phl_info, struct phl_snd_grp *grp, u8 *nsta)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_SUCCESS;
struct phl_snd_sta *snd_sta;
FUNCIN_WSTS(pstatus);
snd_sta = &grp->sta[0];
/* CQI Fb cannot mixed with SU/MU feedback type*/
if(snd_sta->snd_fb_t == PHL_SND_FB_TYPE_CQI)
pstatus = _phl_snd_proc_get_res_cqi_fb(phl_info, grp, nsta);
else
pstatus = _phl_snd_proc_get_res_bf(phl_info, grp, nsta);
if(pstatus != RTW_PHL_STATUS_SUCCESS)
grp->snd_sts = PHL_SND_STS_FAILURE;
FUNCOUT_WSTS(pstatus);
return pstatus;
}
/* 2. SND Preconfiguration */
/**
* _get_mu_mimo_gid_2sta()
* hard code for 8852A, gid relattion-ship
**/
static u8 _get_mu_mimo_gid_2sta(u8 primary, u8 secondary)
{
u8 gid_tbl[6][6] = { {0xFF, 1, 2, 3, 4, 5},
{16, 0xFF, 6, 7, 8, 9},
{17, 21, 0xFF, 10, 11, 12},
{18, 22, 25, 0xFF, 13, 14},
{19, 23, 26, 28, 0xFF, 15},
{20, 24, 27, 29, 30, 0xFF} };
u8 ret = 0xFF;
if ((primary < 6) && (secondary < 6))
ret = gid_tbl[primary][secondary];
return ret;
}
/* pre calculate mu-gid */
enum rtw_phl_status
phl_snd_cal_mu_grp_bitmap(struct phl_info_t *phl_info, struct phl_snd_grp *grp)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_SUCCESS;
struct rtw_phl_stainfo_t *psta_info = NULL;
struct rtw_phl_stainfo_t *tmp_psta_info = NULL;
struct phl_snd_sta *sta = NULL;
struct phl_snd_sta *tmp_sta = NULL;
u8 bfmu_idx , bfmu_idx_tmp;
u8 i = 0, j = 0;
for (i = 0; i < MAX_NUM_STA_SND_GRP; i++) {
sta = &grp->sta[i];
if (false == sta->valid)
continue;
/* primary STA */
psta_info = rtw_phl_get_stainfo_by_macid(
phl_info, sta->macid);
if (NULL == sta->bf_entry)
continue;
if (false == rtw_hal_bf_chk_bf_type(phl_info->hal,
psta_info, true)) {
continue; /*BF SU Entry*/
}
/* primary STA MU Entry Idx */
bfmu_idx = rtw_hal_bf_get_sumu_idx(phl_info->hal,
sta->bf_entry);
psta_info->hal_sta->mugrp_bmp = 0; /* clear first */
for (j = 0; j < MAX_NUM_STA_SND_GRP; j++) {
if (j == i) /* self */
continue;
/* secondary sta */
tmp_sta = &grp->sta[j];
if (NULL == tmp_sta->bf_entry)
continue;
tmp_psta_info = rtw_phl_get_stainfo_by_macid(
phl_info, tmp_sta->macid);
if (false == rtw_hal_bf_chk_bf_type(phl_info->hal,
tmp_psta_info, true)) {
continue; /* BF SU Entry */
}
/* secondary sta MU Entry Idx */
bfmu_idx_tmp = rtw_hal_bf_get_sumu_idx(phl_info->hal,
tmp_sta->bf_entry);
/* Default set group bit enable = 1 */
/* grp bitmap doesn't include self */
/** BIT 0 1 2 3 4
* MU_0 : MU_1 MU_2 MU_3 MU_4 MU_5
* MU_1 : MU_0 MU_2 MU_3 MU_4 MU_5
* MU_2 : MU_0 MU_1 MU_3 MU_4 MU_5
* ...
* MU_5 : MU_0 MU_1 MU_2 MU_3 MU_4
**/
if (bfmu_idx_tmp > bfmu_idx) {
psta_info->hal_sta->mugrp_bmp |=
BIT(bfmu_idx_tmp - 1);
} else {
psta_info->hal_sta->mugrp_bmp |=
BIT(bfmu_idx_tmp);
}
}
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_, "sta(macid = 0x%x) mugrp_bmp = 0x%x \n",
psta_info->macid, psta_info->hal_sta->mugrp_bmp);
}
return pstatus;
}
/* Preconfiguration before souding */
enum rtw_phl_status
phl_snd_proc_precfg(struct phl_info_t *phl_info, struct phl_snd_grp *grp)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_SUCCESS;
enum rtw_hal_status hstatus = RTW_HAL_STATUS_SUCCESS;
struct phl_snd_sta *sta = NULL;
u8 idx = 0;
struct rtw_phl_stainfo_t *psta_info = NULL;
FUNCIN_WSTS(pstatus);
do {
if (grp == NULL) {
pstatus = RTW_PHL_STATUS_FAILURE;
break;
}
if (PHL_SND_TYPE_INVALID == grp->snd_type) {
/* both SW/HW mode need to set call halmac api to set bf entry */
break;
}
for (idx = 0; idx < MAX_NUM_STA_SND_GRP; idx++) {
sta = &grp->sta[idx];
if (false == sta->valid)
continue;
psta_info = rtw_phl_get_stainfo_by_macid(
phl_info, sta->macid);
/*check bf entry available and snd_fb_type = SU/MU */
if ((NULL != psta_info->hal_sta->bf_entry) &&
(PHL_SND_FB_TYPE_CQI != sta->snd_fb_t)) {
hstatus = rtw_hal_snd_proc_pre_cfg_sta(
phl_info->hal, psta_info);
if (hstatus != RTW_HAL_STATUS_SUCCESS) {
pstatus = RTW_PHL_STATUS_FAILURE;
}
}
}
/* Prepare Group bitmap for Tx MU-MIMO */
if (PHL_SND_FB_TYPE_MU == grp->sta[0].snd_fb_t)
pstatus = phl_snd_cal_mu_grp_bitmap(phl_info, grp);
} while (0);
if(pstatus != RTW_PHL_STATUS_SUCCESS)
grp->snd_sts = PHL_SND_STS_FAILURE;
FUNCOUT_WSTS(pstatus);
return pstatus;
}
/* 3. Send Sounding Command to HAL/FW */
/*TODO: RU Allocation is now hard code value */
/* HE TB Sounding : 2 sta in a grp */
void
_phl_snd_proc_fw_cmd_he_tb_2sta(struct phl_info_t *phl_info,
struct phl_snd_grp *grp,
u8 *cmd, u8 bfrp_num)
{
struct phl_sound_obj *snd = (struct phl_sound_obj *)phl_info->snd_obj;
struct rtw_phl_stainfo_t *sta_info = NULL;
u8 *f_ru_tbl = NULL;
if (grp->num_sta != 2)
return;
/* get first sta */
sta_info = rtw_phl_get_stainfo_by_macid(phl_info, grp->sta[0].macid);
if (bfrp_num == 1) {
if (CHANNEL_WIDTH_20 == grp->sta[0].bw)
f_ru_tbl = &snd->snd_param.fix_param.f_ru_tbl_20[1][0];/* Fixed 20MHz RU Table of 2 STA */
else
f_ru_tbl = &snd->snd_param.fix_param.f_ru_tbl_80[1][0];/* Fixed 80MHz RU Table of 2 STA */
} else {
if (CHANNEL_WIDTH_20 == grp->sta[0].bw)
f_ru_tbl = &snd->snd_param.fix_param.f_ru_tbl_20[0][0];/* Fixed 20MHz RU Table of 1 STA */
else
f_ru_tbl = &snd->snd_param.fix_param.f_ru_tbl_80[0][0];/* Fixed 80MHz RU Table of 1 STA */
}
/* fill commmand */
rtw_hal_snd_ax_fwcmd_tb_pri(phl_info->hal, cmd, grp->sta[0].bw,
sta_info, grp->num_sta, 0);
/* Always use BFRP#0 for primary user */
rtw_hal_snd_ax_fwcmd_tb_add_sta(
phl_info->hal, cmd,
&grp->sta[0].npda_sta_info,
sta_info,
f_ru_tbl[0],
0,
0,
0);
/*get second sta*/
sta_info = rtw_phl_get_stainfo_by_macid(phl_info, grp->sta[1].macid);
rtw_hal_snd_ax_fwcmd_tb_add_sta(
phl_info->hal, cmd,
&grp->sta[1].npda_sta_info,
sta_info,
f_ru_tbl[1],
1,
(bfrp_num == 1) ? 0 : 1,
(bfrp_num == 1) ? 1 : 0);
}
/* HE TB Sounding : 3 sta in a grp */
void
_phl_snd_proc_fw_cmd_he_tb_3sta(struct phl_info_t *phl_info,
struct phl_snd_grp *grp,
u8 *cmd, u8 bfrp_num)
{
struct phl_sound_obj *snd = (struct phl_sound_obj *)phl_info->snd_obj;
struct rtw_phl_stainfo_t *sta_info = NULL;
u8 *f_ru_tbl = NULL;
if(grp->num_sta != 3)
return;
/* get first sta */
sta_info = rtw_phl_get_stainfo_by_macid(phl_info, grp->sta[0].macid);
if (bfrp_num == 1) {
if (CHANNEL_WIDTH_20 == grp->sta[0].bw)
f_ru_tbl = &snd->snd_param.fix_param.f_ru_tbl_20[2][0];/* Fixed 20MHz RU Table of 3 STA */
else
f_ru_tbl = &snd->snd_param.fix_param.f_ru_tbl_80[2][0];/* Fixed 80MHz RU Table of 3 STA */
} else {
if (CHANNEL_WIDTH_20 == grp->sta[0].bw)
f_ru_tbl = &snd->snd_param.fix_param.f_ru_tbl_20[1][0];/* Fixed 20MHz RU Table of 2 STA */
else
f_ru_tbl = &snd->snd_param.fix_param.f_ru_tbl_80[1][0];/* Fixed 80MHz RU Table of 2 STA */
}
/* fill commmand */
rtw_hal_snd_ax_fwcmd_tb_pri(phl_info->hal, cmd, grp->sta[0].bw,
sta_info, grp->num_sta, 0);
/* Always use BFRP#0 for primary user */
rtw_hal_snd_ax_fwcmd_tb_add_sta(
phl_info->hal, cmd,
&grp->sta[0].npda_sta_info,
sta_info,
f_ru_tbl[0],
0,
0,
0);
/*get second sta*/
sta_info = rtw_phl_get_stainfo_by_macid(phl_info, grp->sta[1].macid);
rtw_hal_snd_ax_fwcmd_tb_add_sta(
phl_info->hal, cmd,
&grp->sta[1].npda_sta_info,
sta_info,
f_ru_tbl[1],
1,
0,
1);
/*get third sta*/
sta_info = rtw_phl_get_stainfo_by_macid(phl_info, grp->sta[2].macid);
rtw_hal_snd_ax_fwcmd_tb_add_sta(
phl_info->hal, cmd,
&grp->sta[2].npda_sta_info,
sta_info,
f_ru_tbl[2],
2,
(bfrp_num == 1) ? 0 : 1,
(bfrp_num == 1) ? 2 : 0);
}
/* HE TB Sounding : 4 sta in a grp */
void
_phl_snd_proc_fw_cmd_he_tb_4sta(struct phl_info_t *phl_info,
struct phl_snd_grp *grp,
u8 *cmd, u8 bfrp_num)
{
struct phl_sound_obj *snd = (struct phl_sound_obj *)phl_info->snd_obj;
struct rtw_phl_stainfo_t *sta_info = NULL;
u8 *f_ru_tbl = NULL;
if(grp->num_sta != 4)
return;
/* get first sta */
sta_info = rtw_phl_get_stainfo_by_macid(phl_info, grp->sta[0].macid);
if (bfrp_num == 1) {
if (CHANNEL_WIDTH_20 == grp->sta[0].bw)
f_ru_tbl = &snd->snd_param.fix_param.f_ru_tbl_20[3][0];/* Fixed 20MHz RU Table of 4 STA */
else
f_ru_tbl = &snd->snd_param.fix_param.f_ru_tbl_80[3][0];/* Fixed 80MHz RU Table of 4 STA */
} else {
if (CHANNEL_WIDTH_20 == grp->sta[0].bw)
f_ru_tbl = &snd->snd_param.fix_param.f_ru_tbl_20[1][0];/* Fixed 20MHz RU Table of 2 STA */
else
f_ru_tbl = &snd->snd_param.fix_param.f_ru_tbl_80[1][0];/* Fixed 80MHz RU Table of 2 STA */
}
/* fill commmand */
rtw_hal_snd_ax_fwcmd_tb_pri(phl_info->hal, cmd, grp->sta[0].bw,
sta_info, grp->num_sta, 0);
/* Always use BFRP#0 for primary user */
rtw_hal_snd_ax_fwcmd_tb_add_sta(
phl_info->hal, cmd,
&grp->sta[0].npda_sta_info,
sta_info,
f_ru_tbl[0],
0,
0,
0);
/*get second sta*/
sta_info = rtw_phl_get_stainfo_by_macid(phl_info, grp->sta[1].macid);
rtw_hal_snd_ax_fwcmd_tb_add_sta(
phl_info->hal, cmd,
&grp->sta[1].npda_sta_info,
sta_info,
f_ru_tbl[1],
1,
(bfrp_num == 1) ? 0 : 0,
(bfrp_num == 1) ? 1 : 1);
/*get third sta*/
sta_info = rtw_phl_get_stainfo_by_macid(phl_info, grp->sta[2].macid);
rtw_hal_snd_ax_fwcmd_tb_add_sta(
phl_info->hal, cmd,
&grp->sta[2].npda_sta_info,
sta_info,
f_ru_tbl[2],
2,
(bfrp_num == 1) ? 0 : 1,
(bfrp_num == 1) ? 2 : 0);
/*get 4th sta*/
sta_info = rtw_phl_get_stainfo_by_macid(phl_info, grp->sta[3].macid);
rtw_hal_snd_ax_fwcmd_tb_add_sta(
phl_info->hal, cmd,
&grp->sta[3].npda_sta_info,
sta_info,
f_ru_tbl[3],
3,
(bfrp_num == 1) ? 0 : 1,
(bfrp_num == 1) ? 3 : 1);
}
enum rtw_phl_status
phl_snd_proc_start_sounding_fw(struct phl_info_t *phl_info,
struct phl_snd_grp *grp)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
enum rtw_hal_status hstatus = RTW_HAL_STATUS_FAILURE;
struct phl_sound_obj *snd = (struct phl_sound_obj *)phl_info->snd_obj;
struct phl_sound_param *snd_param = &snd->snd_param;
struct rtw_phl_stainfo_t *sta_info = NULL;
u8 *cmd = NULL;
u8 i = 0;
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_, "==> phl_snd_proc_start_sounding_fw \n");
do {
if (NULL == grp)
break;
if(grp->sta[0].valid == 0)
break;
/*get first sta*/
sta_info = rtw_phl_get_stainfo_by_macid(
phl_info, grp->sta[0].macid);
switch (grp->snd_type) {
case PHL_SND_TYPE_VHT_SW:
{
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_, "--> PHL_SND_TYPE_VHT_SW\n");
cmd = rtw_hal_snd_prepare_snd_cmd(phl_info->hal);
if (cmd == NULL)
break;
if (grp->num_sta == 1) {
rtw_hal_snd_vht_fwcmd_su(
phl_info->hal, cmd,
grp->sta[0].bw,
sta_info,
&grp->sta[0].npda_sta_info);
} else {
rtw_hal_snd_vht_fwcmd_mu_pri(
phl_info->hal, cmd,
grp->sta[0].bw,
sta_info,
grp->num_sta,
&grp->sta[0].npda_sta_info);
for (i = 1; i < grp->num_sta; i++) {
if(grp->sta[i].valid == 0)
break;
sta_info = rtw_phl_get_stainfo_by_macid(
phl_info, grp->sta[i].macid);
rtw_hal_snd_vht_fwcmd_mu_add_sta(
phl_info->hal, cmd,
&grp->sta[i].npda_sta_info,
sta_info,
i,
(i==(grp->num_sta-1)) ? 1 : 0
);
}
}
rtw_hal_snd_set_fw_cmd_dialogtkn(
phl_info->hal, cmd,
0,
snd_param->snd_dialog_token);
hstatus = rtw_hal_snd_send_fw_cmd(phl_info->hal, cmd);
if (hstatus != RTW_HAL_STATUS_SUCCESS) {
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_,
"ERROR: rtw_hal_snd_send_fw_cmd Fail!!!!\n");
}
/* free cmd buf at last !!! */
hstatus = rtw_hal_snd_release_snd_cmd(phl_info->hal, cmd);
}
break;
case PHL_SND_TYPE_HE_SW:
{
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_, "--> PHL_SND_TYPE_HE_SW\n");
cmd = rtw_hal_snd_prepare_snd_cmd(phl_info->hal);
if (cmd == NULL)
break;
if (grp->num_sta == 1) {
rtw_hal_snd_ax_fwcmd_nontb(
phl_info->hal, cmd,
grp->sta[0].bw,
sta_info,
&grp->sta[0].npda_sta_info);
} else {
/* Default use only 1 BFRP */
/* TODO: Fixed mode or when to use 2 BFRP */
if (grp->num_sta == 4)
_phl_snd_proc_fw_cmd_he_tb_4sta(
phl_info, grp, cmd, 1);
else if (grp->num_sta == 3)
_phl_snd_proc_fw_cmd_he_tb_3sta(
phl_info, grp, cmd, 1);
else if (grp->num_sta == 2)
_phl_snd_proc_fw_cmd_he_tb_2sta(
phl_info, grp, cmd, 1);
else
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_, "phl sounding : 1 sta with HE-TB case is NOT Ready ; need add fake sta into NDPA\n");
}
rtw_hal_snd_set_fw_cmd_dialogtkn(
phl_info->hal, cmd,
1,
snd_param->snd_dialog_token);
hstatus = rtw_hal_snd_send_fw_cmd(phl_info->hal, cmd);
if (hstatus != RTW_HAL_STATUS_SUCCESS) {
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_,
"ERROR: rtw_hal_snd_send_fw_cmd Fail!!!!\n");
}
/* free cmd buf at last !!! */
hstatus = rtw_hal_snd_release_snd_cmd(phl_info->hal, cmd);
}
break;
case PHL_SND_TYPE_VHT_HW:
{
u8 dialog_tkn = (snd->snd_param.snd_dialog_token << 2);
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_,
"PHL_SND_TYPE_VHT_HW:\n");
if(NULL == snd->ops.snd_send_ndpa)
break;
rtw_hal_snd_mac_ctrl(phl_info->hal, sta_info->wrole->hw_band, 0);
pstatus = snd->ops.snd_send_ndpa(
phl_to_drvpriv(phl_info),
sta_info->wrole,
&dialog_tkn,
&grp->sta[0].npda_sta_info,
grp->sta[0].bw);
}
break;
case PHL_SND_TYPE_HE_HW:
{
u8 dialog_tkn = (snd->snd_param.snd_dialog_token << 2) | BIT(1);
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_,
"PHL_SND_TYPE_HE_HW:\n");
if(NULL == snd->ops.snd_send_ndpa)
break;
rtw_hal_snd_mac_ctrl(phl_info->hal, sta_info->wrole->hw_band, 0);
pstatus = snd->ops.snd_send_ndpa(
phl_to_drvpriv(phl_info),
sta_info->wrole,
&dialog_tkn,
&grp->sta[0].npda_sta_info,
grp->sta[0].bw);
}
break;
case PHL_SND_TYPE_INVALID:
default:
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_, "ERROR: grp->snd_type invalid\n");
break;
}
pstatus = RTW_PHL_STATUS_SUCCESS;
} while (0);
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_, "<== phl_snd_proc_start_sounding_fw \n");
return pstatus;
}
/* 4. Post Configruation */
/* BY MU_GID if MU Sounding */
enum rtw_phl_status
_phl_snd_proc_postcfg_mu_gid(struct phl_info_t *phl_info,
struct phl_snd_grp *grp)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_SUCCESS;
enum rtw_hal_status hstatus = RTW_HAL_STATUS_SUCCESS;
struct rtw_phl_stainfo_t *psta_info = NULL;
struct phl_snd_sta *sta = NULL;
struct rtw_hal_muba_info ba_info;
u8 i = 0, j = 0;
u8 bfmu_idx;
u8 mugrp_bmp = 0;
u8 gid = 0xFF;
for (i = 0; i < MAX_NUM_STA_SND_GRP; i++) {
sta = &grp->sta[i];
if((false == sta->valid) || (NULL == sta->bf_entry))
continue;
bfmu_idx = rtw_hal_bf_get_sumu_idx(phl_info->hal,
sta->bf_entry);
psta_info = rtw_phl_get_stainfo_by_macid(
phl_info, sta->macid);
mugrp_bmp = psta_info->hal_sta->mugrp_bmp;
/* GID(X + Y)'s setting is same as GID(Y + X)*/
for (j = bfmu_idx; j < 5; j++) {
if (mugrp_bmp & BIT(j)) {
gid = _get_mu_mimo_gid_2sta(bfmu_idx, j + 1);
/*Prepare MU BAR Info*/
rtw_hal_bf_preset_mu_ba_info(phl_info->hal,
psta_info, &ba_info);
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_, "snd_post_cfg : gid = 0x%x \n", gid);
hstatus = rtw_hal_snd_proc_post_cfg_gid(
phl_info->hal,
gid,
(void *)&ba_info);
if (RTW_HAL_STATUS_SUCCESS != hstatus) {
pstatus = RTW_PHL_STATUS_FAILURE;
}
}
}
}
return pstatus;
}
/* Per STA setting */
enum rtw_phl_status
_phl_snd_proc_postcfg_sta(struct phl_info_t *phl_info,
struct phl_snd_grp *grp)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_SUCCESS;
enum rtw_hal_status hstatus = RTW_HAL_STATUS_SUCCESS;
struct phl_sound_obj *snd = (struct phl_sound_obj *)phl_info->snd_obj;
struct rtw_phl_stainfo_t *psta_info = NULL;
struct phl_snd_sta *sta = NULL;
u8 idx = 0;
bool mu = false;
/*post config for a single sta*/
for (idx = 0; idx < MAX_NUM_STA_SND_GRP; idx++) {
sta = &grp->sta[idx];
mu = (sta->snd_fb_t == PHL_SND_FB_TYPE_MU) ? true : false;
if (false == sta->valid)
continue;
psta_info = rtw_phl_get_stainfo_by_macid(phl_info, sta->macid);
if (NULL == psta_info)
continue;
rtw_hal_snd_polling_snd_sts(phl_info->hal, psta_info);
if (RTW_HAL_STATUS_SUCCESS ==
rtw_hal_bf_get_entry_snd_sts(psta_info->hal_sta->bf_entry)) {
sta->snd_sts = PHL_SND_STS_SUCCESS;
} else {
sta->snd_sts = PHL_SND_STS_FAILURE;
}
if ((PHL_SND_STS_SUCCESS != sta->snd_sts) &&
(false == snd->snd_param.bypass_snd_sts_chk)) {
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_, "SKIP STA Post Config because of sounding fail\n");
continue; /*Sounding Fail, Next STA */
}
hstatus = rtw_hal_snd_proc_post_cfg_sta(phl_info->hal,
psta_info, mu);
if (hstatus != RTW_HAL_STATUS_SUCCESS) {
pstatus = RTW_PHL_STATUS_FAILURE;
}
}
return pstatus;
}
/* SND PROC Post Config API for FSM */
enum rtw_phl_status
phl_snd_proc_postcfg(struct phl_info_t *phl_info, struct phl_snd_grp *grp)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_SUCCESS;
enum rtw_hal_status hstatus = RTW_HAL_STATUS_SUCCESS;
bool mu = false, he = true;
FUNCIN();
do {
if (grp == NULL) {
pstatus = RTW_PHL_STATUS_FAILURE;
break;
}
he = (grp->snd_type >= PHL_SND_TYPE_HE_HW) ? true : false;
mu = (grp->sta[0].snd_fb_t == PHL_SND_FB_TYPE_MU) ? true :
false;
/* 1. post config for whole sounding group */
if (grp->skip_post_cfg & BIT(1)) {
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_, "SKIP SND PROC POST CFG - Group \n");
} else {
hstatus = rtw_hal_snd_proc_post_cfg(
phl_info->hal,
he,
mu,
grp->en_fix_mode);
if (hstatus != RTW_HAL_STATUS_SUCCESS) {
pstatus = RTW_PHL_STATUS_FAILURE;
}
}
/* 2. post config for gid (STA + STA) */
if (grp->skip_post_cfg & BIT(2)) {
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_, "SKIP SND PROC POST CFG - GID \n");
} else {
if (true == mu) {
/* only mu sounding has gid related config */
_phl_snd_proc_postcfg_mu_gid(phl_info, grp);
}
}
/* 3. (Shall always at last) post config for each STA in group */
if (grp->skip_post_cfg & BIT(3)) {
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_, "SKIP SND PROC POST CFG - STA \n");
} else {
_phl_snd_proc_postcfg_sta(phl_info, grp);
}
} while (0);
FUNCOUT();
return pstatus;
}
/* SND_PROC_DOWN --> Next Sounding : Check sounding module status */
enum rtw_phl_status
phl_snd_proc_chk_condition(struct phl_info_t *phl_info, struct phl_snd_grp *grp)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
struct phl_sound_obj *snd = (struct phl_sound_obj *)phl_info->snd_obj;
struct rtw_wifi_role_t *role =
(struct rtw_wifi_role_t *)snd->snd_param.m_wrole;
struct phl_snd_sta *sta = NULL;
struct rtw_phl_stainfo_t *psta = NULL;
struct phl_sound_param *para = &snd->snd_param;
u8 i = 0;
u8 terminate = 0;
/* TODO: Add any conditions to stop the sounding fsm here */
do {
if (true == snd->is_terminated)
break;
if (NULL != role) {
if (PHL_RTYPE_STATION == role->type) {
if (MLME_NO_LINK == role->mstate)
break;
psta = rtw_phl_get_stainfo_self(phl_info, role);
if (rtw_hal_bf_get_entry_snd_sts(
psta->hal_sta->bf_entry)) {
para->snd_fail_counter++;
if (para->snd_fail_counter > 10) {
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_ ,
"Sounding Fail Count > 10, break sounding !!!!\n");
break;
}
} else {
para->snd_fail_counter = 0;
}
} else if (PHL_RTYPE_AP == role->type) {
if (false == role->active)
break;
if (grp->sta[0].bw > role->chandef.bw)
break;
if (0 == grp->num_sta)
break;
for (i = 0; i < grp->num_sta; i++) {
sta = &grp->sta[i];
psta = rtw_phl_get_stainfo_by_macid(phl_info, sta->macid);
if (NULL == psta) {
terminate = 1;
break;
}
if (false == psta->active) {
terminate = 1;
break;
}
if (sta->bw != psta->chandef.bw) {
terminate = 1;
break;
}
}
if(terminate)
break;
}
}
pstatus = RTW_PHL_STATUS_SUCCESS;
} while (0);
return pstatus;
}
/**
* Check the previous sounding group sounding status and free the resource.
* if grp is TIER0 grp, skip release BF/CQI resource.
**/
void
phl_snd_proc_chk_prev_grp(struct phl_info_t *phl_info,
struct phl_snd_grp *grp)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_SUCCESS;
bool free_res = false;
if (PHL_SND_STS_FAILURE == grp->snd_sts) {
/* Sounding Fail */
free_res = true;
} else if ((PHL_SND_GRP_TIER_1 == grp->grp_tier) && (PHL_SND_STS_PENDING != grp->snd_sts)) {
/* Sounding Success and Group is TIER_1 */
free_res = true;
}
if (free_res) {
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_, "Free Previous SND Group's Resource\n");
pstatus = phl_snd_proc_release_res(phl_info, grp);
}
return;
}
enum rtw_phl_status
phl_snd_polling_pri_sta_sts(struct phl_info_t *phl_info,
struct phl_snd_grp *grp)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_SUCCESS;
struct rtw_phl_stainfo_t *sta = NULL;
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_,
"phl_snd_polling_stutus : polling primay sta sounding status\n");
sta = rtw_phl_get_stainfo_by_macid(phl_info, grp->sta[0].macid);
if (sta != NULL) {
if (sta->active == true)
rtw_hal_snd_polling_snd_sts(phl_info->hal, sta);
else
pstatus = RTW_PHL_STATUS_FAILURE;
} else {
pstatus = RTW_PHL_STATUS_FAILURE;
}
return pstatus;
}
void
phl_snd_mac_ctrl(struct phl_info_t *phl_info,
struct rtw_wifi_role_t *wrole, u8 ctrl)
{
enum rtw_hal_status hstatus = RTW_HAL_STATUS_SUCCESS;
hstatus = rtw_hal_snd_mac_ctrl(phl_info->hal, wrole->hw_band, ctrl);
}
enum rtw_phl_status
rtw_phl_snd_init_ops_send_ndpa(void *phl,
enum rtw_phl_status (*snd_send_ndpa)(void *,
struct rtw_wifi_role_t *,
u8 *,
u32 *,
enum channel_width))
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct phl_sound_obj *snd = NULL;
if((phl_info != NULL) && (snd_send_ndpa != NULL)) {
if (phl_info->snd_obj != NULL) {
snd = (struct phl_sound_obj *)phl_info->snd_obj;
snd->ops.snd_send_ndpa = snd_send_ndpa;
pstatus = RTW_PHL_STATUS_SUCCESS;
}
}
return pstatus;
}
|
2301_81045437/rtl8852be
|
phl/phl_sound.c
|
C
|
agpl-3.0
| 49,921
|
/******************************************************************************
*
* Copyright(c) 2019 - 2020 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#ifndef __PHL_SOUND_H__
#define __PHL_SOUND_H__
#define MAX_SND_GRP_NUM 4
#define MAX_SND_HE_BFRP_NUM 2
#define MAX_SND_HE_BFRP_USER_NUM 4
#define MAX_SND_VHT_BFRP_NUM 3
#define SND_PROC_DEFAULT_PERIOD 200 /* ms */
#define SND_PROC_DEFAULT_TIMEOUT 10 /* ms */
/* MAX_NUM_STA_SND_GRP :phl sounding design limit = 4, hw/fw support maximum 8 STA in a sounding frame exchange*/
#define MAX_NUM_STA_SND_GRP 4
enum snd_type {
PHL_SND_TYPE_INVALID,
PHL_SND_TYPE_VHT_HW,
PHL_SND_TYPE_VHT_SW,
PHL_SND_TYPE_HE_HW,
PHL_SND_TYPE_HE_SW
};
enum snd_fb_type {
PHL_SND_FB_TYPE_SU,
PHL_SND_FB_TYPE_MU,
PHL_SND_FB_TYPE_CQI
};
enum snd_status {
PHL_SND_STS_PENDING,
PHL_SND_STS_ON_PROC,
PHL_SND_STS_SUCCESS,
PHL_SND_STS_FAILURE
};
enum snd_grp_tier {
PHL_SND_GRP_TIER_0, /* TIER_0 Group will free sounding resource in state SND_PROC_TERMINATE or group removed*/
PHL_SND_GRP_TIER_1 /* TIER_1 Group will free sounding resource in next SND_PROC_IDLE*/
};
enum snd_test_flag {
PHL_SND_TEST_F_NONE = 0, /* default value */
PHL_SND_TEST_F_ONE_TIME = BIT(0), /* Test mode : only sounding one time */
PHL_SND_TEST_F_FIX_STA = BIT(1), /* forced SND STAs (skip grouping and check TP) */
PHL_SND_TEST_F_GRP_SND_PARA = BIT(2), /* Fixed SND STA's Feedback Type or BW from snd->fix_param */
PHL_SND_TEST_F_GRP_EN_BF_FIX = BIT(3), /* Enable grp->en_fix_mode in grouping function */
PHL_SND_TEST_F_PASS_STS_CHK = BIT(4) /* by pass sounding status check when post config */
};
struct npda_dialog_token {
u8 reserved:1;
u8 he:1;
u8 token:6;
};
struct vht_ndpa_sta_info {
u16 aid12:12;
#define VHT_NDPA_FB_TYPE_SU 0
#define VHT_NDPA_FB_TYPE_MU 1
u16 feedback_type:1;
u16 nc:3;
};
struct he_ndpa_sta_info {
u32 aid:11;
u32 bw:14;
u32 fb_ng:2;
u32 disambiguation:1;
u32 cb:1;
u32 nc:3;
};
struct phl_snd_sta {
u8 valid;
u16 macid;
enum channel_width bw; /* Sounding BW */
enum snd_fb_type snd_fb_t; /* Sounding feedback type : SU/MU/CQI */
u32 npda_sta_info; /* VHT/HE NDPA STA info*/
enum snd_status snd_sts;
/* Query resource in SND PROC */
void *bf_entry;/* HAL BF Entry for sounding */
};
struct phl_snd_grp {
u8 gidx;
enum snd_type snd_type;
enum snd_grp_tier grp_tier;
u8 wrole_idx;
u8 band;
u8 num_sta;
struct phl_snd_sta sta[MAX_NUM_STA_SND_GRP];
enum snd_status snd_sts;
u8 en_fix_mode; /* post confg forced mode setting */
/**
* en_swap_mode : Only TIER_0 MU group support. When swap mode is enable,
* each MU Entry will use two CSI MU buffer. Therefore, it cannan skip Disable
* MU in Preconfig when next round sounding
**/
u8 en_swap_mode;
u8 skip_post_cfg;/* 1: SKIP ALL; BIT1:Skip Group, BIT2:Skip GID, BIT3:Skip STA */
};
/* for whole phl snd fsm module fixed mode, only worked if snd_param->test_flag != 0 */
struct phl_snd_fix_param {
u8 en_fix_gidx;
u8 en_fix_fb_type;
u8 en_fix_sta;
u8 en_fix_snd_bw;
u8 grp_idx;
enum snd_fb_type snd_fb_type;
u16 sta_macid[MAX_NUM_STA_SND_GRP];
enum channel_width bw[MAX_NUM_STA_SND_GRP];
u8 f_ru_tbl_20[MAX_SND_HE_BFRP_USER_NUM][MAX_SND_HE_BFRP_USER_NUM];
u8 f_ru_tbl_80[MAX_SND_HE_BFRP_USER_NUM][MAX_SND_HE_BFRP_USER_NUM];
};
struct phl_sound_param {
void *m_wrole;
struct phl_snd_grp snd_grp[MAX_SND_GRP_NUM];
u32 grp_used_map;
u8 cur_proc_grp_idx;
u8 pre_proc_grp_idx;
u8 snd_func_grp_num;
u8 snd_dialog_token;
u8 snd_proc_timeout_ms;
u32 proc_start_time;
u8 snd_proc_period;
bool bypass_snd_sts_chk;
u32 test_flag;
struct phl_snd_fix_param fix_param;
u8 snd_fail_counter;
};
struct phl_snd_ops
{
enum rtw_phl_status (*snd_send_ndpa)(void *drv_priv,
struct rtw_wifi_role_t *wrole,
u8 *snd_dialog_tkn,
u32 *ndpa_sta,
enum channel_width snd_bw);
};
struct phl_sound_obj {
#ifdef CONFIG_FSM
struct fsm_main *fsm;
struct fsm_obj *fsm_obj;
#endif
struct phl_sound_param snd_param;
struct phl_snd_ops ops;
struct phl_info_t *phl_info;
void *iface;
u8 snd_in_progress;
u8 is_terminated;
_os_lock snd_lock;
/* snd test */
u8 wrole_idx;
/* snd cmd disp related */
u8 msg_busy;
_os_lock cmd_lock;
};
#ifdef CONFIG_FSM
enum rtw_phl_status phl_snd_new_obj(
struct fsm_main *fsm,
struct phl_info_t *phl_info);
#endif
/* phl sounding intern api*/
enum rtw_phl_status phl_snd_func_snd_init(struct phl_info_t *phl_info);
enum rtw_phl_status phl_snd_func_pre_config(struct phl_info_t *phl_info);
/* phl sounding extern api*/
enum rtw_phl_status
rtw_phl_sound_start(void *phl, u8 wrole_idx, u8 st_dlg_tkn, u8 period, u8 test_flag);
enum rtw_phl_status
rtw_phl_sound_abort(void *phl);
enum rtw_phl_status
rtw_phl_sound_down_ev(void *phl);
void rtw_phl_snd_fix_tx_he_mu(struct phl_info_t *phl_info, u8 gid, bool en);
/* snd func grp */
struct phl_snd_grp *
phl_snd_get_grp_byidx(struct phl_info_t *phl_info, u8 gidx);
void
phl_snd_func_remove_grp_all(struct phl_info_t *phl_info);
enum rtw_phl_status
phl_snd_func_grouping(struct phl_info_t *phl_info, u8 wroleidx);
/* snd proc resource */
enum rtw_phl_status
phl_snd_proc_get_res(
struct phl_info_t *phl_info, struct phl_snd_grp *grp, u8 *nsta);
enum rtw_phl_status
phl_snd_proc_release_res(struct phl_info_t *phl_info, struct phl_snd_grp *grp);
/* snd proc precfg */
enum rtw_phl_status
phl_snd_proc_precfg(struct phl_info_t *phl_info, struct phl_snd_grp *grp);
/* snd proc busy cmd to fw */
enum rtw_phl_status
phl_snd_proc_start_sounding_fw(struct phl_info_t *phl_info,
struct phl_snd_grp *grp);
/* snd proc postcfg */
enum rtw_phl_status
phl_snd_proc_postcfg(struct phl_info_t *phl_info, struct phl_snd_grp *grp);
enum rtw_phl_status
phl_snd_proc_chk_condition(struct phl_info_t *phl_info, struct phl_snd_grp *grp);
void
phl_snd_proc_chk_prev_grp(struct phl_info_t *phl_info,
struct phl_snd_grp *grp);
enum rtw_phl_status
phl_snd_polling_pri_sta_sts(struct phl_info_t *phl_info,
struct phl_snd_grp *grp);
void
phl_snd_mac_ctrl(struct phl_info_t *phl_info,
struct rtw_wifi_role_t *wrole, u8 ctrl);
#endif
|
2301_81045437/rtl8852be
|
phl/phl_sound.h
|
C
|
agpl-3.0
| 6,762
|
/******************************************************************************
*
* Copyright(c) 2019 - 2020 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#include "phl_headers.h"
#ifdef CONFIG_CMD_DISP
/* START of sounding / beamform cmd_disp module */
void
_phl_snd_cmd_set_eng_busy(struct phl_info_t *phl_info, enum snd_cmd_disp_ctrl ctrl)
{
struct phl_sound_obj *snd = (struct phl_sound_obj *)phl_info->snd_obj;
if (snd != NULL) {
snd->msg_busy |= BIT(ctrl);
}
}
void
_phl_snd_cmd_set_eng_idle(struct phl_info_t *phl_info, enum snd_cmd_disp_ctrl ctrl)
{
struct phl_sound_obj *snd = (struct phl_sound_obj *)phl_info->snd_obj;
if (snd != NULL) {
snd->msg_busy &= ~(BIT(ctrl));
}
}
enum rtw_phl_status
_phl_snd_cmd_get_eng_status(struct phl_info_t *phl_info, enum snd_cmd_disp_ctrl ctrl)
{
struct phl_sound_obj *snd = (struct phl_sound_obj *)phl_info->snd_obj;
enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
if (snd != NULL) {
if (0 != (snd->msg_busy & BIT(ctrl))) {
pstatus = RTW_PHL_STATUS_PENDING;
} else {
pstatus = RTW_PHL_STATUS_SUCCESS;
}
} else {
pstatus = RTW_PHL_STATUS_RESOURCE;
}
return pstatus;
}
enum rtw_phl_status
_phl_snd_aquire_eng(struct phl_info_t *phl_info, enum snd_cmd_disp_ctrl ctrl)
{
struct phl_sound_obj *snd = (struct phl_sound_obj *)phl_info->snd_obj;
void *d = phl_to_drvpriv(phl_info);
enum rtw_phl_status pstatus = RTW_PHL_STATUS_SUCCESS;
if (snd == NULL)
return RTW_PHL_STATUS_FAILURE;
/* acuired cmd_disp_eng control */
_os_spinlock(d, &snd->cmd_lock, _bh, NULL);
pstatus = _phl_snd_cmd_get_eng_status(phl_info, ctrl);
if (pstatus != RTW_PHL_STATUS_SUCCESS) {
_os_spinunlock(d, &snd->cmd_lock, _bh, NULL);
return pstatus;
}
_phl_snd_cmd_set_eng_busy(phl_info, SND_CMD_DISP_CTRL_BFEE);
_os_spinunlock(d, &snd->cmd_lock, _bh, NULL);
return pstatus;
}
void
_phl_snd_free_eng(struct phl_info_t *phl_info, enum snd_cmd_disp_ctrl ctrl)
{
struct phl_sound_obj *snd = (struct phl_sound_obj *)phl_info->snd_obj;
void *d = phl_to_drvpriv(phl_info);
if (snd != NULL) {
_os_spinlock(d, &snd->cmd_lock, _bh, NULL);
_phl_snd_cmd_set_eng_idle(phl_info, ctrl);
_os_spinunlock(d, &snd->cmd_lock, _bh, NULL);
}
}
static enum phl_mdl_ret_code
_phl_snd_cmd_module_init(void *phl, void *dispr, void **priv)
{
enum phl_mdl_ret_code ret = MDL_RET_SUCCESS;
#ifdef CONFIG_SND_CMD
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
void *d = phl_to_drvpriv(phl_info);
struct phl_sound_obj *snd_obj = NULL;
PHL_INFO("--> %s\n", __func__);
if (NULL == phl_info->snd_obj) {
phl_info->snd_obj = _os_kmem_alloc(d, sizeof(struct phl_sound_obj));
}
snd_obj = phl_info->snd_obj;
if (snd_obj == NULL) {
ret = MDL_RET_FAIL;
} else {
/* Init the snd static resources here */
snd_obj->phl_info = (struct phl_info_t *)phl;
_os_spinlock_init(d, &snd_obj->snd_lock);
_os_spinlock_init(d, &snd_obj->cmd_lock);
}
#endif
*priv = (void *)phl;
return ret;
}
static void _phl_snd_cmd_module_deinit(void *dispr, void *priv)
{
#ifdef CONFIG_SND_CMD
struct phl_info_t *phl_info = (struct phl_info_t *)priv;
struct phl_sound_obj *snd_obj = (struct phl_sound_obj *)phl_info->snd_obj;
void *d = phl_to_drvpriv(phl_info);
PHL_INFO("--> %s\n", __func__);
if (NULL != snd_obj) {
_os_spinlock_free(d, &snd_obj->snd_lock);
_os_spinlock_free(d, &snd_obj->cmd_lock);
_os_kmem_free(d, snd_obj, sizeof(struct phl_sound_obj));
}
phl_info->snd_obj = NULL;
#endif
return;
}
static enum phl_mdl_ret_code _phl_snd_cmd_module_start(void *dispr, void *priv)
{
return MDL_RET_SUCCESS;
}
static enum phl_mdl_ret_code _phl_snd_cmd_module_stop(void *dispr, void *priv)
{
return MDL_RET_SUCCESS;
}
static enum phl_mdl_ret_code
_phl_snd_cmd_module_set_info(void *dispr, void *priv,
struct phl_module_op_info *info)
{
enum phl_mdl_ret_code mstatus = MDL_RET_IGNORE;
struct phl_info_t *phl = (struct phl_info_t *)priv;
PHL_INFO("[SND_CMD], %s(): opcode %d.\n", __func__, info->op_code);
switch (info->op_code) {
case SND_CMD_OP_SET_AID:
{
struct snd_cmd_set_aid *cmdbuf = (struct snd_cmd_set_aid *)info->inbuf;
if (NULL == cmdbuf) {
mstatus = MDL_RET_FAIL;
break;
}
if (NULL == cmdbuf->sta_info) {
mstatus = MDL_RET_FAIL;
cmdbuf->cmd_sts = MDL_RET_FAIL;
break;
}
if (RTW_HAL_STATUS_SUCCESS != rtw_hal_beamform_set_aid(
phl->hal, cmdbuf->sta_info, cmdbuf->aid)) {
mstatus = MDL_RET_FAIL;
cmdbuf->cmd_sts = MDL_RET_FAIL;
break;
}
#ifdef RTW_WKARD_BFEE_SET_AID
cmdbuf->sta_info->wrole->last_set_aid = cmdbuf->aid;
#endif
cmdbuf->cmd_sts = MDL_RET_SUCCESS;
mstatus = MDL_RET_SUCCESS;
}
break;
case SND_CMD_OP_NONE:
case SND_CMD_OP_MAX:
default:
break;
}
return mstatus;
}
static enum phl_mdl_ret_code
_phl_snd_cmd_module_query_info(void *dispr, void *priv,
struct phl_module_op_info *info)
{
return MDL_RET_IGNORE;
}
static enum phl_mdl_ret_code
_phl_snd_cmd_module_msg_msg_hdlr_pre(struct phl_info_t *phl,
struct phl_msg *msg)
{
enum phl_mdl_ret_code mstatus = MDL_RET_IGNORE;
switch (MSG_EVT_ID_FIELD(msg->msg_id)) {
case MSG_EVT_SET_VHT_GID:
/* do nothing in pre phase */
break;
case MSG_EVT_SET_BFEE_AID:
/* do nothing in pre phase */
break;
default:
break;
}
return mstatus;
}
static enum phl_mdl_ret_code
_phl_snd_cmd_module_msg_msg_hdlr_post(struct phl_info_t *phl,
struct phl_msg *msg)
{
enum phl_mdl_ret_code mstatus = MDL_RET_IGNORE;
switch (MSG_EVT_ID_FIELD(msg->msg_id)) {
case MSG_EVT_SET_VHT_GID:
{
struct rtw_phl_gid_pos_tbl *gid_tbl = (struct rtw_phl_gid_pos_tbl *)msg->inbuf;
if (msg->inlen != sizeof(struct rtw_phl_gid_pos_tbl)) {
PHL_TRACE(COMP_PHL_DBG, _PHL_INFO_,
"%s() VHT-BFEE : Error, size mis-match \n", __func__);
mstatus = MDL_RET_FAIL;
break;
}
rtw_hal_beamform_set_vht_gid(phl->hal, msg->band_idx, gid_tbl);
mstatus = MDL_RET_SUCCESS;
}
break;
case MSG_EVT_SET_BFEE_AID:
{
struct snd_cmd_set_aid *cmdbuf = (struct snd_cmd_set_aid *)msg->inbuf;
if (NULL == cmdbuf) {
mstatus = MDL_RET_FAIL;
break;
}
if (NULL == cmdbuf->sta_info) {
mstatus = MDL_RET_FAIL;
cmdbuf->cmd_sts = MDL_RET_FAIL;
break;
}
if (RTW_HAL_STATUS_SUCCESS != rtw_hal_beamform_set_aid(
phl->hal, cmdbuf->sta_info, cmdbuf->aid)) {
mstatus = MDL_RET_FAIL;
cmdbuf->cmd_sts = MDL_RET_FAIL;
break;
}
cmdbuf->cmd_sts = MDL_RET_SUCCESS;
mstatus = MDL_RET_SUCCESS;
}
break;
default:
break;
}
return mstatus;
}
static enum phl_mdl_ret_code
_phl_snd_cmd_module_msg_hdlr(void *dispr, void *priv,
struct phl_msg *msg)
{
struct phl_info_t *phl_info = (struct phl_info_t *)priv;
enum phl_mdl_ret_code mstatus = MDL_RET_IGNORE;
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_,
"===> %s() event id : 0x%x \n", __func__, MSG_EVT_ID_FIELD(msg->msg_id));
if (IS_MSG_FAIL(msg->msg_id)) {
PHL_TRACE(COMP_PHL_DBG, _PHL_WARNING_,
"%s: cmd dispatcher notify cmd failure: 0x%x.\n",
__FUNCTION__, msg->msg_id);
mstatus = MDL_RET_FAIL;
return mstatus;
}
if (IS_MSG_IN_PRE_PHASE(msg->msg_id)) {
mstatus = _phl_snd_cmd_module_msg_msg_hdlr_pre(phl_info, msg);
} else {
mstatus = _phl_snd_cmd_module_msg_msg_hdlr_post(phl_info, msg);
}
return mstatus;
}
enum rtw_phl_status phl_snd_cmd_register_module(struct phl_info_t *phl_info)
{
enum rtw_phl_status phl_status = RTW_PHL_STATUS_FAILURE;
struct phl_bk_module_ops bk_ops;
struct phl_cmd_dispatch_engine *disp_eng = &(phl_info->disp_eng);
u8 i = 0;
bk_ops.init = _phl_snd_cmd_module_init;
bk_ops.deinit = _phl_snd_cmd_module_deinit;
bk_ops.start = _phl_snd_cmd_module_start;
bk_ops.stop = _phl_snd_cmd_module_stop;
bk_ops.msg_hdlr = _phl_snd_cmd_module_msg_hdlr;
bk_ops.set_info = _phl_snd_cmd_module_set_info;
bk_ops.query_info = _phl_snd_cmd_module_query_info;
for (i = 0; i < disp_eng->phy_num; i++) {
phl_status = phl_disp_eng_register_module(phl_info, i,
PHL_MDL_SOUND, &bk_ops);
if (phl_status != RTW_PHL_STATUS_SUCCESS) {
PHL_ERR("%s register SOUND module in cmd disp band[%d] failed\n",
__func__, i);
phl_status = RTW_PHL_STATUS_FAILURE;
break;
}
}
return phl_status;
}
/* Start of APIs for Core/OtherModule */
/* sub-functions */
/**
* _phl_snd_cmd_post_set_vht_gid(...)
* used by rtw_phl_snd_cmd_set_vht_gid(..)
**/
void _phl_snd_cmd_post_set_vht_gid(void* priv, struct phl_msg* msg)
{
struct phl_info_t *phl_info = (struct phl_info_t *)priv;
PHL_TRACE(COMP_PHL_DBG, _PHL_INFO_, "--> %s : release memeory \n", __func__);
if (msg->inbuf && msg->inlen){
_os_kmem_free(phl_to_drvpriv(phl_info), msg->inbuf, msg->inlen);
}
/* release cmd_disp_eng control */
_phl_snd_free_eng(phl_info, SND_CMD_DISP_CTRL_BFEE);
}
/* main-functions */
/**
* rtw_phl_snd_cmd_set_vht_gid (...)
* input : struct rtw_phl_gid_pos_tbl *tbl
* the received VHT GID management frame's GID / Position informaion.
**/
enum rtw_phl_status
rtw_phl_snd_cmd_set_vht_gid(void *phl,
struct rtw_wifi_role_t *wrole,
struct rtw_phl_gid_pos_tbl *tbl)
{
enum rtw_phl_status phl_status = RTW_PHL_STATUS_FAILURE;
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct phl_msg msg = {0};
struct phl_msg_attribute attr = {0};
void *d = phl_to_drvpriv(phl_info);
struct rtw_phl_gid_pos_tbl *gid_tbl;
/* acuired cmd_disp_eng control */
if (RTW_PHL_STATUS_SUCCESS !=
_phl_snd_aquire_eng(phl_info, SND_CMD_DISP_CTRL_BFEE)) {
return RTW_PHL_STATUS_FAILURE;
}
SET_MSG_MDL_ID_FIELD(msg.msg_id, PHL_MDL_SOUND);
SET_MSG_EVT_ID_FIELD(msg.msg_id, MSG_EVT_SET_VHT_GID);
msg.band_idx = wrole->hw_band;
attr.completion.completion = _phl_snd_cmd_post_set_vht_gid;
attr.completion.priv = phl_info;
gid_tbl = (struct rtw_phl_gid_pos_tbl *)_os_kmem_alloc(d, sizeof(struct rtw_phl_gid_pos_tbl));
_os_mem_cpy(d, gid_tbl, tbl, sizeof(struct rtw_phl_gid_pos_tbl));
msg.inbuf = (u8 *)gid_tbl;
msg.inlen = sizeof(struct rtw_phl_gid_pos_tbl);
phl_status = phl_disp_eng_send_msg(phl_info, &msg, &attr, NULL);
if (phl_status != RTW_PHL_STATUS_SUCCESS) {
PHL_TRACE(COMP_PHL_DBG, _PHL_ERR_, "%s: Dispr send msg fail!\n",
__func__);
goto exit;
}
return phl_status;
exit:
_os_kmem_free(d, gid_tbl,
sizeof(struct rtw_phl_gid_pos_tbl));
/* release cmd_disp_eng control */
_phl_snd_free_eng(phl_info, SND_CMD_DISP_CTRL_BFEE);
return phl_status;
}
void _phl_snd_cmd_post_set_aid(void* priv, struct phl_msg* msg)
{
struct phl_info_t *phl_info = (struct phl_info_t *)priv;
#ifdef RTW_WKARD_BFEE_SET_AID
struct snd_cmd_set_aid *cmdbuf = NULL;
#endif
if (msg->inbuf && msg->inlen){
#ifdef RTW_WKARD_BFEE_SET_AID
cmdbuf = (struct snd_cmd_set_aid *)msg->inbuf;
/* backup aid */
if (MDL_RET_SUCCESS == cmdbuf->cmd_sts) {
cmdbuf->sta_info->wrole->last_set_aid = cmdbuf->aid;
PHL_TRACE(COMP_PHL_DBG, _PHL_INFO_,
"%s : set aid (%d)\n",
__func__, cmdbuf->sta_info->wrole->last_set_aid);
}
#endif
_os_kmem_free(phl_to_drvpriv(phl_info), msg->inbuf, msg->inlen);
}
/* release cmd_disp_eng control */
_phl_snd_free_eng(phl_info, SND_CMD_DISP_CTRL_BFEE);
PHL_TRACE(COMP_PHL_DBG, _PHL_INFO_, "%s : set aid complete\n", __func__);
}
enum rtw_phl_status
rtw_phl_snd_cmd_set_aid(void *phl,
struct rtw_wifi_role_t *wrole,
struct rtw_phl_stainfo_t *sta,
u16 aid)
{
enum rtw_phl_status phl_status = RTW_PHL_STATUS_FAILURE;
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct phl_msg msg = {0};
struct phl_msg_attribute attr = {0};
void *d = phl_to_drvpriv(phl_info);
struct snd_cmd_set_aid *cmdbuf = NULL;
/* acuired cmd_disp_eng control */
if (RTW_PHL_STATUS_SUCCESS !=
_phl_snd_aquire_eng(phl_info, SND_CMD_DISP_CTRL_BFEE)) {
return RTW_PHL_STATUS_FAILURE;
}
SET_MSG_MDL_ID_FIELD(msg.msg_id, PHL_MDL_SOUND);
SET_MSG_EVT_ID_FIELD(msg.msg_id, MSG_EVT_SET_BFEE_AID);
msg.band_idx = wrole->hw_band;
attr.completion.completion = _phl_snd_cmd_post_set_aid;
attr.completion.priv = phl_info;
cmdbuf = (struct snd_cmd_set_aid *)_os_kmem_alloc(d, sizeof(struct snd_cmd_set_aid));
cmdbuf->aid = aid;
cmdbuf->sta_info = sta;
msg.inbuf = (u8 *)cmdbuf;
msg.inlen = sizeof(struct snd_cmd_set_aid);
phl_status = phl_disp_eng_send_msg(phl_info, &msg, &attr, NULL);
if (phl_status != RTW_PHL_STATUS_SUCCESS) {
PHL_TRACE(COMP_PHL_DBG, _PHL_ERR_, "%s: Dispr send msg fail!\n",
__func__);
goto exit;
}
return phl_status;
exit:
_os_kmem_free(d, cmdbuf, sizeof(struct snd_cmd_set_aid));
/* release cmd_disp_eng control */
_phl_snd_free_eng(phl_info, SND_CMD_DISP_CTRL_BFEE);
return phl_status;
}
/**
* For other module, such as LPS, direct set aid.
**/
enum rtw_phl_status
phl_snd_cmd_set_aid_info(struct phl_info_t *phl,
struct rtw_wifi_role_t *wrole,
struct rtw_phl_stainfo_t *sta,
u16 aid)
{
enum rtw_phl_status phl_status = RTW_PHL_STATUS_FAILURE;
struct phl_module_op_info op_info = {0};
struct snd_cmd_set_aid cmdbuf = {0};
PHL_TRACE(COMP_PHL_DBG, _PHL_INFO_, "--> %s\n", __func__);
cmdbuf.aid = aid;
cmdbuf.sta_info = sta;
op_info.op_code = SND_CMD_OP_SET_AID;
op_info.inbuf = (u8 *)&cmdbuf;
op_info.inlen = sizeof(struct snd_cmd_set_aid);
phl_status = phl_disp_eng_set_bk_module_info(phl, wrole->hw_band,
PHL_MDL_SOUND, &op_info);
PHL_TRACE(COMP_PHL_DBG, _PHL_INFO_, "<-- %s\n", __func__);
return phl_status;
}
#ifdef RTW_WKARD_BFEE_SET_AID
enum rtw_phl_status
_phl_cmd_snd_restore_aid(struct phl_info_t *phl,
struct rtw_wifi_role_t *wrole)
{
enum rtw_phl_status phl_status = RTW_PHL_STATUS_SUCCESS;
struct rtw_phl_stainfo_t *sta = NULL;
do {
if (MLME_NO_LINK == wrole->mstate)
break;
sta = rtw_phl_get_stainfo_self(phl, wrole);
if (NULL == sta)
break;
if (wrole->last_set_aid == sta->aid)
break;
phl_status = phl_snd_cmd_set_aid_info(phl, wrole, sta, sta->aid);
} while (0);
return phl_status;
}
#endif
enum rtw_phl_status
_phl_snd_cmd_ntfy_ps_enter(struct phl_info_t *phl,
struct rtw_wifi_role_t *wrole)
{
enum rtw_phl_status phl_status = RTW_PHL_STATUS_SUCCESS;
#ifdef RTW_WKARD_BFEE_SET_AID
phl->phl_com->is_in_lps = 1;
if (PHL_RTYPE_STATION == wrole->type) {
phl_status= _phl_cmd_snd_restore_aid(phl, wrole);
}
#endif
/* TODO: stop BFer period sounding if in progress */
return phl_status;
}
enum rtw_phl_status
_phl_snd_cmd_ntfy_ps_leave(struct phl_info_t *phl,
struct rtw_wifi_role_t *wrole)
{
enum rtw_phl_status phl_status = RTW_PHL_STATUS_SUCCESS;
#ifdef RTW_WKARD_BFEE_SET_AID
phl->phl_com->is_in_lps = 0;
#endif
/* TODO: restart BFer period sounding if sounding needed */
return phl_status;
}
enum rtw_phl_status
phl_snd_cmd_ntfy_ps(struct phl_info_t *phl,
struct rtw_wifi_role_t *wrole,
bool enter)
{
enum rtw_phl_status phl_status = RTW_PHL_STATUS_SUCCESS;
if (true == enter)
phl_status = _phl_snd_cmd_ntfy_ps_enter(phl, wrole);
else
phl_status = _phl_snd_cmd_ntfy_ps_leave(phl, wrole);
return phl_status;
}
#endif
|
2301_81045437/rtl8852be
|
phl/phl_sound_cmd.c
|
C
|
agpl-3.0
| 15,480
|
/******************************************************************************
*
* Copyright(c) 2020 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#ifndef _PHL_SOUND_CMD_H_
#define _PHL_SOUND_CMD_H_
#ifdef CONFIG_CMD_DISP
enum snd_cmd_disp_ctrl {
SND_CMD_DISP_CTRL_BFEE = 0,
SND_CMD_DISP_CTRL_BFER,
SND_CMD_DISP_CTRL_MAX
};
enum snd_cmd_set_info_opcode {
SND_CMD_OP_NONE = 0,
SND_CMD_OP_SET_AID = 1,
SND_CMD_OP_MAX = 2,
};
struct snd_cmd_set_aid {
u16 aid;
enum phl_mdl_ret_code cmd_sts;
struct rtw_phl_stainfo_t *sta_info;
};
enum rtw_phl_status phl_snd_cmd_register_module(struct phl_info_t *phl_info);
enum rtw_phl_status
rtw_phl_snd_cmd_set_aid(void *phl,
struct rtw_wifi_role_t *wrole,
struct rtw_phl_stainfo_t *sta,
u16 aid);
enum rtw_phl_status
phl_snd_cmd_ntfy_ps(struct phl_info_t *phl,
struct rtw_wifi_role_t *wrole,
bool enter);
#endif
#endif
|
2301_81045437/rtl8852be
|
phl/phl_sound_cmd.h
|
C
|
agpl-3.0
| 1,393
|
/******************************************************************************
*
* Copyright(c) 2019 - 2020 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#include "phl_headers.h"
/*
* SND sub function
*/
#ifdef CONFIG_FSM
static void snd_set_timer(struct phl_sound_obj *snd, int duration, u16 event)
{
FSM_INFO(snd->fsm, "%s, duration=%d\n", __func__, duration);
phl_fsm_set_alarm(snd->fsm_obj, duration, event);
}
enum rtw_phl_status
phl_snd_upd_snd_para(struct phl_info_t *phl_info, void *param)
{
struct phl_sound_obj *snd = (struct phl_sound_obj *)phl_info->snd_obj;
enum rtw_phl_status pstatus = RTW_PHL_STATUS_SUCCESS;
struct phl_snd_start_req *req = (struct phl_snd_start_req *)param;
struct phl_sound_param *snd_param = &snd->snd_param;
struct rtw_wifi_role_t *wrole = NULL;
FUNCIN_WSTS(pstatus);
snd_param->snd_func_grp_num = 0;
snd_param->grp_used_map = 0;
snd_param->snd_fail_counter = 0;
if (req != NULL) {
snd_param->m_wrole = req->wrole;
wrole = (struct rtw_wifi_role_t *)req->wrole;
snd_param->snd_proc_timeout_ms = req->proc_timeout_ms;/* ms */
snd_param->snd_dialog_token = req->dialog_token;
snd_param->snd_proc_period = req->proc_period;
snd_param->test_flag = req->test_flag;
snd_param->bypass_snd_sts_chk = req->bypass_sts_chk;
}
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_,
"wrole->type = 0x%x \n",
wrole->type);
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_,
"timeout = %d ms\n", snd_param->snd_proc_timeout_ms);
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_,
"dialog tkn = %d\n", snd_param->snd_dialog_token);
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_,
"snd period = %d ms\n", snd_param->snd_proc_period);
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_,
"test flag = %d \n", snd_param->test_flag);
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_,
"by pass sts check = %d \n", snd_param->bypass_snd_sts_chk);
FUNCIN_WSTS(pstatus);
return pstatus;
}
/*
* SND state handler
*/
static int snd_fsm_func_init_st_hdl(void *obj, u16 event, void *param)
{
struct phl_sound_obj *snd = (struct phl_sound_obj *)obj;
void *d = phl_to_drvpriv(snd->phl_info);
int ret = 0;
switch (event) {
case FSM_EV_STATE_IN:
/* Pending and waiting sounding start event */
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_,
"snd_fsm_func_init_st_hdl : SND FUNC pending and wait SND START\n");
break;
case SND_FUNC_EV_SND_START:
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_,
"snd_fsm_func_init_st_hdl : Recv SND START Event at timestamp %d\n",
_os_get_cur_time_us());
/* SND_FUNC_EV_SND_START Shall have input param */
_os_spinlock(d, &snd->snd_lock, _bh, NULL);
snd->snd_in_progress = 1; /*TODO: Check set flag timing*/
_os_spinunlock(d, &snd->snd_lock, _bh, NULL);
phl_snd_upd_snd_para(snd->phl_info, param);
phl_fsm_state_goto(snd->fsm_obj, SND_ST_FUNC_READY);
break;
case FSM_EV_STATE_OUT:
phl_fsm_cancel_alarm(snd->fsm_obj);
break;
case FSM_EV_CANCEL:
case FSM_EV_TIMER_EXPIRE:
break;
default:
break;
}
return ret;
}
static int snd_fsm_func_deinit_st_hdl(void *obj, u16 event, void *param)
{
struct phl_sound_obj *snd = (struct phl_sound_obj *)obj;
int ret = 0;
switch (event) {
case FSM_EV_STATE_IN:
break;
case FSM_EV_STATE_OUT:
phl_fsm_cancel_alarm(snd->fsm_obj);
break;
case FSM_EV_CANCEL:
case FSM_EV_TIMER_EXPIRE:
break;
default:
break;
}
return ret;
}
static int snd_fsm_func_ready_st_hdl(void *obj, u16 event, void *param)
{
struct phl_sound_obj *snd = (struct phl_sound_obj *)obj;
int ret = 0;
struct rtw_wifi_role_t *m_wrole =
(struct rtw_wifi_role_t *)snd->snd_param.m_wrole;
enum rtw_phl_status pstatus = RTW_PHL_STATUS_SUCCESS;
switch (event) {
case FSM_EV_STATE_IN:
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_,
"snd_fsm_func_ready_st_hdl : FSM_EV_STATE_IN\n");
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_,
"snd_fsm_func_ready_st_hdl : (Re)Start Sounding Timestamp %d\n",
_os_get_cur_time_us());
if(NULL == m_wrole) {
snd_set_timer(snd, 0, SND_EV_TERMINATE);
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_,
"snd_fsm_func_ready_st_hdl : NULL == m_wrole\n");
break;
}
if ((PHL_RTYPE_STATION == m_wrole->type) &&
(MLME_LINKED != m_wrole->mstate)) {
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_,
"snd_fsm_func_ready_st_hdl : role STA in not in linked : 0x%x\n",
m_wrole->mstate);
snd_set_timer(snd, 0, SND_EV_TERMINATE);
break;
}
snd_set_timer(snd, 10, SND_FUNC_EV_PROC_GRP);
break;
case FSM_EV_STATE_OUT:
phl_fsm_cancel_alarm(snd->fsm_obj);
break;
case FSM_EV_CANCEL:
case FSM_EV_TIMER_EXPIRE:
phl_fsm_cancel_alarm(snd->fsm_obj);
break;
case SND_FUNC_EV_PROC_GRP:
phl_snd_func_remove_grp_all(snd->phl_info);
/*TODO: loop for all of the wrole */
if(NULL == m_wrole) {
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_,
"==> ROLE == NULL !!!\n STOP SND Function!!!\n");
snd_set_timer(snd, 10, SND_EV_TERMINATE);
break;
}
pstatus = phl_snd_func_grouping(snd->phl_info, m_wrole->id);
if (pstatus != RTW_PHL_STATUS_SUCCESS) {
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_,
"==> SND_FUNC_EV_PROC_GRP FAIL !!!\n STOP SND Function!!!\n");
snd_set_timer(snd, 10, SND_EV_TERMINATE);
break;
}
if (0 == snd->snd_param.snd_func_grp_num) {
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_,
"==> Available SND GRP == 0 !!!\n STOP SND Function!!!\n");
snd_set_timer(snd, 10, SND_EV_TERMINATE);
break;
}
/* fall through */
/*go through*/
case SND_FUNC_EV_START_SND_PROC:
pstatus = phl_snd_func_pre_config(snd->phl_info);
if (pstatus != RTW_PHL_STATUS_SUCCESS) {
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_,
"==> SND_FUNC_EV_START_SND_PROC FAIL !!!\n STOP SND Function!!!\n");
snd_set_timer(snd, 10, SND_EV_TERMINATE);
break;
}
phl_fsm_state_goto(snd->fsm_obj, SND_ST_PROC_IDLE);
break;
case SND_EV_TERMINATE:
snd->is_terminated = true;
phl_fsm_cancel_alarm(snd->fsm_obj);
phl_fsm_state_goto(snd->fsm_obj, SND_ST_PROC_TERMINATE);
break;
default:
break;
}
return ret;
}
static int snd_fsm_func_leave_st_hdl(void *obj, u16 event, void *param)
{
struct phl_sound_obj *snd = (struct phl_sound_obj *)obj;
void *d = phl_to_drvpriv(snd->phl_info);
int ret = 0;
switch (event) {
case FSM_EV_STATE_IN:
_os_spinlock(d, &snd->snd_lock, _bh, NULL);
snd->snd_in_progress = 0;
_os_spinunlock(d, &snd->snd_lock, _bh, NULL);
phl_fsm_state_goto(snd->fsm_obj, SND_ST_FUNC_INIT);
break;
case FSM_EV_STATE_OUT:
phl_fsm_cancel_alarm(snd->fsm_obj);
break;
case FSM_EV_CANCEL:
case FSM_EV_TIMER_EXPIRE:
phl_fsm_cancel_alarm(snd->fsm_obj);
break;
case SND_EV_TERMINATE:
_os_spinlock(d, &snd->snd_lock, _bh, NULL);
snd->is_terminated = 1;
snd->snd_in_progress = 0;
_os_spinunlock(d, &snd->snd_lock, _bh, NULL);
phl_fsm_state_goto(snd->fsm_obj, SND_ST_FUNC_INIT);
break;
default:
break;
}
return ret;
}
static int snd_fsm_proc_idle_st_hdl(void *obj, u16 event, void *param)
{
struct phl_sound_obj *snd = (struct phl_sound_obj *)obj;
struct phl_sound_param *snd_param = &snd->snd_param;
int ret = 0;
struct phl_snd_grp *grp = NULL;
switch (event) {
case FSM_EV_STATE_IN:
/* go through */
case SND_PROC_EV_IDLE_FREE_PRE_RES :
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_,
"pre_proc_grp_idx : %d ; cur_proc_grp_idx : %d \n",
snd_param->pre_proc_grp_idx, snd_param->cur_proc_grp_idx);
if (snd_param->cur_proc_grp_idx !=
snd_param->pre_proc_grp_idx) {
grp = phl_snd_get_grp_byidx(snd->phl_info,
snd_param->pre_proc_grp_idx);
if (grp != NULL) {
phl_snd_proc_chk_prev_grp(snd->phl_info, grp);
}
}
/* fall through */
/* go through */
case SND_PROC_EV_IDLE_GET_SND_GRP :
if(MAX_SND_GRP_NUM == snd_param->cur_proc_grp_idx) {
snd_set_timer(snd, 10, SND_EV_TERMINATE);
break;
}
grp = phl_snd_get_grp_byidx(snd->phl_info,
snd_param->cur_proc_grp_idx);
/* TODO: Check Grp STA is still available */
/* phl_snd_check_condition() */
if (NULL == grp) {
snd_set_timer(snd, 0, SND_EV_TERMINATE);
break;
}
phl_fsm_state_goto(snd->fsm_obj, SND_ST_PROC_BUSY); /* For SND FSM LOOP Test */
break;
case FSM_EV_CANCEL:
case FSM_EV_TIMER_EXPIRE:
phl_fsm_cancel_alarm(snd->fsm_obj);
break;
case FSM_EV_STATE_OUT:
phl_fsm_cancel_alarm(snd->fsm_obj);
break;
case SND_EV_TERMINATE:
snd->is_terminated = true;
phl_fsm_state_goto(snd->fsm_obj, SND_ST_PROC_TERMINATE);
break;
default:
break;
}
return ret;
}
static int snd_fsm_proc_busy_st_hdl(void *obj, u16 event, void *param)
{
struct phl_sound_obj *snd = (struct phl_sound_obj *)obj;
struct phl_sound_param *snd_param = &snd->snd_param;
int ret = 0;
struct phl_snd_grp *grp = NULL;
enum rtw_phl_status pstatus = RTW_PHL_STATUS_SUCCESS;
u8 sta_num = 0;
struct rtw_wifi_role_t *wrole =
(struct rtw_wifi_role_t *)snd->snd_param.m_wrole;
switch (event) {
case FSM_EV_STATE_IN:
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_,
"SND PROC BUSY : Process Grp idx = %d\n",
snd_param->cur_proc_grp_idx);
grp = phl_snd_get_grp_byidx(snd->phl_info,
snd_param->cur_proc_grp_idx);
if (NULL == grp) {
snd_set_timer(snd, 0, SND_PROC_EV_BUSY_SND_DOWN);
break;
}
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_,
"SND_PROC_BUSY : grp sta number = %d \n",
grp->num_sta);
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_,
"SND_PROC_BUSY : grp snd_fb_t = 0x%x \n",
grp->sta[0].snd_fb_t);
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_,
"SND_PROC_BUSY : grp primary sta macid = 0x%x \n",
grp->sta[0].macid);
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_,
"SND_PROC_BUSY : grp sta-1 macid = 0x%x \n",
grp->sta[1].macid);
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_,
"SND_PROC_BUSY : grp sta-2 macid = 0x%x \n",
grp->sta[2].macid);
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_,
"SND_PROC_BUSY : grp sta-3 macid = 0x%x \n",
grp->sta[3].macid);
/* fall through */
case SND_PROC_EV_BUSY_GET_BF_RES:
pstatus = phl_snd_proc_get_res(snd->phl_info, grp, &sta_num);
if ((sta_num == 0) || (RTW_PHL_STATUS_SUCCESS != pstatus)) {
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_, "SND_PROC_BUSY : sta_num = 0 !!! \n");
grp->snd_sts = PHL_SND_STS_FAILURE;
snd_set_timer(snd, 0, SND_PROC_EV_BUSY_SND_DOWN);
break;
}
/* fall through */
/* go through */
case SND_PROC_EV_BUSY_PRE_CFG:
pstatus = phl_snd_proc_precfg(snd->phl_info, grp);
/* fall through */
/* go through */
case SND_PROC_EV_BUSY_TRIG_SND:
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_,
"[SND] Dialog token = 0x%x\n",
snd->snd_param.snd_dialog_token);
pstatus = phl_snd_proc_start_sounding_fw(snd->phl_info, grp);
if (RTW_PHL_STATUS_SUCCESS != pstatus) {
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_,
"phl_snd_proc_start_sounding_fw FAIL\n");
grp->snd_sts = PHL_SND_STS_FAILURE;
if((grp->snd_type == PHL_SND_TYPE_HE_HW) ||
(grp->snd_type == PHL_SND_TYPE_VHT_HW)) {
phl_snd_mac_ctrl(snd->phl_info, wrole, 1);
}
/* TODO : FW / halmac might crashed , stop sounding fsm module */
snd_set_timer(snd, 0, SND_EV_TERMINATE);
}
snd->snd_param.snd_dialog_token++;
snd_set_timer(snd, 10, SND_PROC_EV_BUSY_REL_SND_CTRL);
break;
case SND_PROC_EV_BUSY_REL_SND_CTRL :
grp = phl_snd_get_grp_byidx(snd->phl_info,
snd_param->cur_proc_grp_idx);
if (NULL == grp) {
snd_set_timer(snd, 0, SND_PROC_EV_BUSY_SND_DOWN);
break;
}
if((grp->snd_type == PHL_SND_TYPE_HE_HW) ||
(grp->snd_type == PHL_SND_TYPE_VHT_HW)) {
phl_snd_mac_ctrl(snd->phl_info, wrole, 1);
}
snd_set_timer(snd, snd->snd_param.snd_proc_timeout_ms,
SND_PROC_EV_BUSY_SND_DOWN);
break;
case SND_PROC_EV_BUSY_SND_DOWN:
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_,
"SND_PROC_BUSY: Recv C2H or Timeout , Switch to PROC DOWN\n");
grp = phl_snd_get_grp_byidx(snd->phl_info,
snd_param->cur_proc_grp_idx);
if (NULL != grp)
phl_snd_polling_pri_sta_sts(snd->phl_info, grp);
phl_fsm_state_goto(snd->fsm_obj, SND_ST_PROC_DOWN); /* For SND FSM Periodic LOOP Test */
break;
case FSM_EV_STATE_OUT:
phl_fsm_cancel_alarm(snd->fsm_obj);
break;
case FSM_EV_CANCEL:
case FSM_EV_TIMER_EXPIRE:
break;
case SND_EV_TERMINATE:
snd->is_terminated = true;
phl_fsm_state_goto(snd->fsm_obj, SND_ST_PROC_TERMINATE);
break;
default:
break;
}
return ret;
}
static int snd_fsm_proc_down_st_hdl(void *obj, u16 event, void *param)
{
struct phl_sound_obj *snd = (struct phl_sound_obj *)obj;
int ret = 0;
struct phl_sound_param *snd_param = &snd->snd_param;
struct phl_snd_grp *grp = NULL;
enum rtw_phl_status pstatus = RTW_PHL_STATUS_SUCCESS;
switch (event) {
case FSM_EV_STATE_IN:
grp = phl_snd_get_grp_byidx(snd->phl_info,
snd_param->cur_proc_grp_idx);
if (NULL == grp) {
snd_set_timer(snd, snd_param->snd_proc_period,
SND_PROC_EV_DOWN_SND_END);
break;
}
if (PHL_SND_STS_FAILURE == grp->snd_sts) {
snd_set_timer(snd, snd_param->snd_proc_period,
SND_PROC_EV_DOWN_SND_END);
}
/* fall through */
/* go through */
case SND_PROC_EV_DOWN_POST_CFG:
pstatus = phl_snd_proc_postcfg(snd->phl_info, grp);
if (pstatus == RTW_PHL_STATUS_SUCCESS) {
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_,
"POST CONFIG SUCCESS!!!!\n");
}
/* fall through */
/* go through */
case SND_PROC_EV_DOWN_SND_END:
/* Check Test Mode */
if (snd_param->test_flag&PHL_SND_TEST_F_ONE_TIME) {
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_,
"TEST MODE : Set EV to TERMINATE by test_flag\n");
snd_set_timer(snd, snd_param->snd_proc_period,
SND_EV_TERMINATE);
break;
}
/* TODO: check next sounding needed
if any sta disassociate or new sta associate, need re-grouping? */
grp = phl_snd_get_grp_byidx(snd->phl_info,
snd_param->cur_proc_grp_idx);
if (grp == NULL) {
snd_set_timer(snd, snd_param->snd_proc_period,
SND_EV_TERMINATE);
}
if (RTW_PHL_STATUS_SUCCESS ==
phl_snd_proc_chk_condition(snd->phl_info, grp)) {
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_,
"Status : Linked ; Set Timer for Next Sounding \n");
snd_set_timer(snd, snd_param->snd_proc_period,
SND_PROC_EV_DOWN_NEXT_SND);
} else {
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_,
"Status : ERROR ; Set Timer to Terminate \n");
snd_set_timer(snd, snd_param->snd_proc_period,
SND_EV_TERMINATE);
}
break;
case FSM_EV_CANCEL:
case FSM_EV_TIMER_EXPIRE:
break;
case FSM_EV_STATE_OUT:
phl_fsm_cancel_alarm(snd->fsm_obj);
break;
case SND_PROC_EV_DOWN_NEXT_SND:
snd_param->pre_proc_grp_idx = snd_param->cur_proc_grp_idx;
snd_param->cur_proc_grp_idx++;
if (snd_param->cur_proc_grp_idx >=
snd_param->snd_func_grp_num) {
/* start sounding process from group list head or terminated */
if (snd->is_terminated)
snd_param->cur_proc_grp_idx = MAX_SND_GRP_NUM;
else
snd_param->cur_proc_grp_idx = 0;
}
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_,
"Next Sounding Group Idx = %d",
snd_param->cur_proc_grp_idx);
phl_fsm_state_goto(snd->fsm_obj, SND_ST_PROC_IDLE);
break;
case SND_EV_TERMINATE:
snd->is_terminated = true;
phl_fsm_state_goto(snd->fsm_obj, SND_ST_PROC_TERMINATE);
break;
default:
break;
}
return ret;
}
static int snd_fsm_proc_terminiate_st_hdl(void *obj, u16 event, void *param)
{
struct phl_sound_obj *snd = (struct phl_sound_obj *)obj;
int ret = 0;
switch (event) {
case FSM_EV_STATE_IN:
/* Free all sounding resource and goto FUNC Leave */
phl_snd_func_remove_grp_all(snd->phl_info);
phl_fsm_state_goto(snd->fsm_obj, SND_ST_FUNC_LEAVE);
break;
case FSM_EV_CANCEL:
case FSM_EV_TIMER_EXPIRE:
phl_fsm_cancel_alarm(snd->fsm_obj);
break;
case FSM_EV_STATE_OUT:
phl_fsm_cancel_alarm(snd->fsm_obj);
break;
default:
break;
}
return ret;
}
static void snd_dump_obj(void *obj, char *s, int *sz)
{
/* nothing to do for now */
}
static void snd_dump_fsm(void *fsm, char *s, int *sz)
{
/* nothing to do for now */
}
/* STATE table */
static struct fsm_state_ent snd_state_tbl[] = {
ST_ENT(SND_ST_FUNC_INIT, snd_fsm_func_init_st_hdl),
ST_ENT(SND_ST_FUNC_DEINIT, snd_fsm_func_deinit_st_hdl),
ST_ENT(SND_ST_FUNC_READY, snd_fsm_func_ready_st_hdl),
ST_ENT(SND_ST_FUNC_LEAVE, snd_fsm_func_leave_st_hdl),
ST_ENT(SND_ST_PROC_IDLE, snd_fsm_proc_idle_st_hdl),
ST_ENT(SND_ST_PROC_BUSY, snd_fsm_proc_busy_st_hdl),
ST_ENT(SND_ST_PROC_DOWN, snd_fsm_proc_down_st_hdl),
ST_ENT(SND_ST_PROC_TERMINATE,snd_fsm_proc_terminiate_st_hdl)
};
/* EVENT table */
static struct fsm_event_ent snd_event_tbl[] = {
EV_ENT(SND_FUNC_EV_INIT),
EV_ENT(SND_FUNC_EV_DEINIT),
EV_ENT(SND_FUNC_EV_SND_START),
EV_ENT(SND_FUNC_EV_NOTIFY_PENDING_REQ),
EV_ENT(SND_FUNC_EV_CHK_SND_STS),
EV_ENT(SND_FUNC_EV_PROC_GRP),
EV_ENT(SND_FUNC_EV_START_SND_PROC),
EV_ENT(SND_FUNC_EV_END_SND),
/*PROC IDLE*/
EV_ENT(SND_PROC_EV_IDLE_GET_SND_GRP),
/*PROC BUSY*/
EV_ENT(SND_PROC_EV_BUSY_GET_BF_RES),
EV_ENT(SND_PROC_EV_IDLE_FREE_PRE_RES),
EV_ENT(SND_PROC_EV_BUSY_PRE_CFG),
EV_ENT(SND_PROC_EV_BUSY_TRIG_SND),
EV_ENT(SND_PROC_EV_BUSY_REL_SND_CTRL),
EV_ENT(SND_PROC_EV_BUSY_POLLING_STS),
EV_ENT(SND_PROC_EV_BUSY_SND_DOWN),
/*PROC DOWN*/
EV_ENT(SND_PROC_EV_DOWN_POST_CFG),
EV_ENT(SND_PROC_EV_DOWN_SND_END),
EV_ENT(SND_PROC_EV_DOWN_NEXT_SND),
/*COMMON*/
EV_ENT(SND_EV_TERMINATE),
EV_ENT(SND_EV_DONOTHING),
EV_ENT(SND_EV_MAX),
};
/* For EXTERNAL application to create a SND FSM */
/* @root: FSM root structure
* @phl_info: private data structure to invoke hal/phl function
*
* return
* fsm_main: FSM main structure (Do NOT expose)
*/
struct fsm_main *phl_sound_new_fsm(struct fsm_root *root,
struct phl_info_t *phl_info)
{
void *d = phl_to_drvpriv(phl_info);
struct fsm_main *fsm = NULL;
struct rtw_phl_fsm_tb tb;
_os_mem_set(d, &tb, 0, sizeof(tb));
tb.state_tbl = snd_state_tbl;
tb.max_state = sizeof(snd_state_tbl)/sizeof(snd_state_tbl[0]);
tb.max_event = sizeof(snd_event_tbl)/sizeof(snd_event_tbl[0]);
tb.evt_tbl = snd_event_tbl;
tb.dump_obj = snd_dump_obj;
tb.dump_obj = snd_dump_fsm;
tb.dbg_level = FSM_DBG_INFO;
tb.evt_level = FSM_DBG_INFO;
fsm = phl_fsm_init_fsm(root, "sound", phl_info, &tb);
return fsm;
}
/* For EXTERNAL application to destory SND FSM */
/* @fsm: see fsm_main
*/
void phl_snd_destory_fsm(struct fsm_main *fsm)
{
if (fsm == NULL)
return;
/* deinit fsm local variable if has */
/* call FSM Framewro to deinit fsm */
phl_fsm_deinit_fsm(fsm);
}
/* For EXTERNAL application to destory sound object */
/* @pcmd: local created command object
*
*/
void phl_snd_destory_obj(struct phl_sound_obj *snd)
{
void *d = phl_to_drvpriv(snd->phl_info);
if (snd == NULL)
return;
/* deinit obj local variable if has */
_os_spinlock_free(d, &snd->snd_lock);
_os_spinlock_free(d, &snd->cmd_lock);
/* inform FSM framewory to recycle fsm_obj */
phl_fsm_destory_obj(snd->fsm_obj);
}
/* For EXTERNAL interrupt handler to send event into snd fsm (expose) */
enum rtw_phl_status phl_snd_fsm_ev_c2h_snd_down(void *phl)
{
enum RTW_PHL_SND_NOTIFY_EVENT notify = RTW_PHL_SND_C2H_SND_DOWN;
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_,
"phl_snd_event_c2h_snd_down notify 0x%x\n", notify);
return phl_snd_fsm_send_msg(phl, notify, NULL, 0);
}
enum rtw_phl_status
phl_snd_fsm_ev_start_func(void *phl, void *req)
{
enum rtw_phl_status status = RTW_PHL_STATUS_FAILURE;
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
void *d = phl_to_drvpriv(phl_info);
struct phl_snd_start_req *snd_req = NULL;//(struct phl_snd_start_req *)req;
enum RTW_PHL_SND_NOTIFY_EVENT notify = RTW_PHL_SND_START;
do {
snd_req = (struct phl_snd_start_req *)_os_kmem_alloc(d,
sizeof(*snd_req));
if (snd_req == NULL)
break;
_os_mem_cpy(d, snd_req, req, sizeof(*snd_req));
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_,
"phl_snd_event_start_func notify 0x%x\n", notify);
status = phl_snd_fsm_send_msg(phl, notify, snd_req,
sizeof(*snd_req));
_os_kmem_free(d, snd_req, sizeof(*snd_req));
} while (0);
return status;
}
enum rtw_phl_status phl_snd_fsm_ev_abort(void *phl)
{
enum RTW_PHL_SND_NOTIFY_EVENT notify = RTW_PHL_SND_ABORT;
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_,
"phl_snd_event_abort notify 0x%x\n", notify);
return phl_snd_fsm_send_msg(phl, notify, NULL, 0);
}
/* @phl: phl_info_t
* @notify: event to snd fsm
*/
enum rtw_phl_status
phl_snd_fsm_send_msg(void *phl, enum RTW_PHL_SND_NOTIFY_EVENT notify,
void *buf, u32 buf_sz)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct phl_sound_obj *snd = phl_info->snd_obj;
u16 event;
switch (notify) {
case RTW_PHL_SND_START: /* Start Sound Func */
event = SND_FUNC_EV_SND_START;
break;
case RTW_PHL_SND_ABORT: /* Go Terminate*/
event = SND_EV_TERMINATE;
break;
case RTW_PHL_SND_C2H_SND_DOWN: /* Sound Dwon Configuration */
event = SND_PROC_EV_BUSY_SND_DOWN;
break;
default:
return RTW_PHL_STATUS_FAILURE;
}
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_,
"phl_snd_send_msg event %d\n",
event);
return phl_fsm_gen_msg(phl, snd->fsm_obj, buf, buf_sz, event);
}
/* For EXTERNAL application notify from upper layer*/
void phl_snd_notify_from_upper_watchdog_status(void *phl, bool inprogress)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct phl_sound_obj *snd = NULL;
snd = phl_info->snd_obj;
PHL_TRACE(COMP_PHL_SOUND, _PHL_INFO_, "is snd in progress : %d \n",
snd->snd_in_progress);
/*TODO:*/
}
#endif /*CONFIG_FSM*/
|
2301_81045437/rtl8852be
|
phl/phl_sound_fsm.c
|
C
|
agpl-3.0
| 21,786
|
/******************************************************************************
*
* Copyright(c) 2019 - 2020 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#ifndef __PHL_SOUND_FSM_H__
#define __PHL_SOUND_FSM_H__
struct phl_sound_obj;
enum RTW_PHL_SND_NOTIFY_EVENT {
RTW_PHL_SND_START,
RTW_PHL_SND_ABORT,
RTW_PHL_SND_C2H_SND_DOWN /* C2H Sounding Down */
};
enum SND_EV_ID {
SND_FUNC_EV_INIT,
SND_FUNC_EV_DEINIT,
SND_FUNC_EV_SND_START,
SND_FUNC_EV_NOTIFY_PENDING_REQ,
SND_FUNC_EV_CHK_SND_STS,
SND_FUNC_EV_PROC_GRP,
SND_FUNC_EV_START_SND_PROC,
SND_FUNC_EV_END_SND,
SND_PROC_EV_IDLE_GET_SND_GRP,
SND_PROC_EV_IDLE_FREE_PRE_RES,
SND_PROC_EV_BUSY_GET_BF_RES,
SND_PROC_EV_BUSY_PRE_CFG,
SND_PROC_EV_BUSY_TRIG_SND,
SND_PROC_EV_BUSY_REL_SND_CTRL,
SND_PROC_EV_BUSY_POLLING_STS,
SND_PROC_EV_BUSY_SND_DOWN,
SND_PROC_EV_DOWN_POST_CFG,
SND_PROC_EV_DOWN_SND_END,
SND_PROC_EV_DOWN_NEXT_SND,
SND_EV_TERMINATE,
SND_EV_DONOTHING,
SND_EV_MAX
};
enum SND_STATE_ST {
/* Initializae and de-initialize*/
SND_ST_FUNC_INIT,
SND_ST_FUNC_DEINIT,
/* FUNC MAIN STATE */
SND_ST_FUNC_READY,
SND_ST_FUNC_LEAVE,
/* SND PROC - periodic */
SND_ST_PROC_IDLE,
SND_ST_PROC_BUSY,
SND_ST_PROC_DOWN,
SND_ST_PROC_TERMINATE
};
struct phl_snd_start_req {
void *wrole;
u8 dialog_token;
u8 proc_timeout_ms;
u8 proc_period;
bool bypass_sts_chk;
u8 test_flag;
};
struct fsm_main *phl_sound_new_fsm(struct fsm_root *root,
struct phl_info_t *phl_info);
void phl_snd_destory_fsm(struct fsm_main *fsm);
enum rtw_phl_status phl_snd_fsm_ev_c2h_snd_down(void *phl);
void phl_snd_destory_obj(struct phl_sound_obj *snd);
enum rtw_phl_status phl_snd_fsm_ev_abort(void *phl);
enum rtw_phl_status
phl_snd_fsm_ev_start_func(void *phl, void *req);
enum rtw_phl_status
phl_snd_fsm_send_msg(void *phl, enum RTW_PHL_SND_NOTIFY_EVENT notify,
void *buf, u32 buf_sz);
u8 phl_snd_is_inprogress(void *phl);
#endif
|
2301_81045437/rtl8852be
|
phl/phl_sound_fsm.h
|
C
|
agpl-3.0
| 2,405
|
/******************************************************************************
*
* Copyright(c) 2019 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#define _PHL_STA_C_
#include "phl_headers.h"
/*********** macid ctrl section ***********/
enum rtw_phl_status
phl_macid_ctrl_init(struct phl_info_t *phl)
{
struct rtw_phl_com_t *phl_com = phl->phl_com;
struct hal_spec_t *hal_spec = phl_get_ic_spec(phl_com);
struct macid_ctl_t *macid_ctl = phl_to_mac_ctrl(phl);
enum rtw_phl_status phl_status = RTW_PHL_STATUS_FAILURE;
u8 i = 0;
/* check invalid value or not */
if (hal_spec->macid_num == 0) {
PHL_ERR("Cannot get macid_num of hal\n");
goto exit;
}
_os_spinlock_init(phl_to_drvpriv(phl), &macid_ctl->lock);
macid_ctl->max_num = MIN(hal_spec->macid_num, PHL_MACID_MAX_NUM);
PHL_INFO("%s macid max_num:%d\n", __func__, macid_ctl->max_num);
for (i = 0; i < MAX_WIFI_ROLE_NUMBER; i++)
macid_ctl->wrole_bmc[i] = macid_ctl->max_num;
phl_status = RTW_PHL_STATUS_SUCCESS;
exit:
return phl_status;
}
enum rtw_phl_status
phl_macid_ctrl_deinit(struct phl_info_t *phl)
{
struct macid_ctl_t *macid_ctl = phl_to_mac_ctrl(phl);
_os_spinlock_free(phl_to_drvpriv(phl), &macid_ctl->lock);
macid_ctl->max_num = 0;
return RTW_PHL_STATUS_SUCCESS;
}
static u8
_phl_macid_is_used(u32 *map, const u16 id)
{
int map_idx = (int)id / 32;
if (map[map_idx] & BIT(id % 32))
return true;
else
return false;
}
static void
_phl_macid_map_set(u32 *map, const u16 id)
{
int map_idx = (int)id / 32;
map[map_idx] |= BIT(id % 32);
}
static void
_phl_macid_map_clr(u32 *map, const u16 id)
{
int map_idx = (int)id / 32;
map[map_idx] &= ~BIT(id % 32);
}
static void _phl_wrole_bcmc_id_set(struct macid_ctl_t *macid_ctl,
struct rtw_wifi_role_t *wrole, const u16 id)
{
macid_ctl->wrole_bmc[wrole->id] = id;
}
static enum rtw_phl_status
_phl_alloc_macid(struct phl_info_t *phl_info,
struct rtw_phl_stainfo_t *phl_sta)
{
struct macid_ctl_t *mc = phl_to_mac_ctrl(phl_info);
struct rtw_wifi_role_t *wrole = phl_sta->wrole;
u8 bc_addr[MAC_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
u16 mid = 0;
u16 max_macid_num = 0;
bool bmc_sta = false;
if (wrole == NULL) {
PHL_ERR("%s wrole=NULL!\n", __func__);
return RTW_PHL_STATUS_FAILURE;
}
if (_os_mem_cmp(phl_to_drvpriv(phl_info),
bc_addr, phl_sta->mac_addr, MAC_ALEN) == 0)
bmc_sta = true;
/* TODO
if (wrole->type == PHL_RTYPE_STATION)
else if (wrole->type == PHL_RTYPE_AP)*/
/*TODO - struct mac_ax_hw_info-> u16 macid_num; need to check */
max_macid_num = mc->max_num;
_os_spinlock(phl_to_drvpriv(phl_info), &mc->lock, _bh, NULL);
for(mid = 0; mid < max_macid_num; mid++) {
if (!_phl_macid_is_used(mc->used_map, mid)) {
_phl_macid_map_set(mc->used_map, mid);
_phl_macid_map_set(&mc->wifi_role_usedmap[wrole->id][0], mid);
mc->sta[mid] = phl_sta;
if (bmc_sta) {
_phl_macid_map_set(mc->bmc_map, mid);
_phl_wrole_bcmc_id_set(mc, wrole, mid);
}
break;
}
}
_os_spinunlock(phl_to_drvpriv(phl_info), &mc->lock, _bh, NULL);
if (mid == max_macid_num) {
phl_sta->macid = max_macid_num;
PHL_ERR("%s cannot get macid\n", __func__);
return RTW_PHL_STATUS_FAILURE;
}
phl_sta->macid = mid;
PHL_INFO("%s allocate %02x:%02x:%02x:%02x:%02x:%02x for macid:%u\n", __func__,
phl_sta->mac_addr[0], phl_sta->mac_addr[1], phl_sta->mac_addr[2],
phl_sta->mac_addr[3], phl_sta->mac_addr[4], phl_sta->mac_addr[5],
phl_sta->macid);
return RTW_PHL_STATUS_SUCCESS;
}
static enum rtw_phl_status
_phl_release_macid(struct phl_info_t *phl_info,
struct rtw_phl_stainfo_t *phl_sta)
{
struct macid_ctl_t *macid_ctl = phl_to_mac_ctrl(phl_info);
enum rtw_phl_status phl_status = RTW_PHL_STATUS_FAILURE;
struct rtw_wifi_role_t *wrole = phl_sta->wrole;
u8 bc_addr[MAC_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
u16 invalid_macid = macid_ctl->max_num;
if (phl_sta->macid >= invalid_macid) {
PHL_ERR("_phl_release_macid macid error (%d\n)", phl_sta->macid);
phl_status = RTW_PHL_STATUS_FAILURE;
goto exit;
}
_os_spinlock(phl_to_drvpriv(phl_info), &macid_ctl->lock, _bh, NULL);
if (!_phl_macid_is_used(macid_ctl->used_map, phl_sta->macid)) {
PHL_WARN("_phl_release_macid macid unused (%d\n)", phl_sta->macid);
_os_spinunlock(phl_to_drvpriv(phl_info), &macid_ctl->lock, _bh, NULL);
phl_status = RTW_PHL_STATUS_FAILURE;
goto exit;
}
_phl_macid_map_clr(macid_ctl->used_map, phl_sta->macid);
_phl_macid_map_clr(&macid_ctl->wifi_role_usedmap[wrole->id][0], phl_sta->macid);
macid_ctl->sta[phl_sta->macid] = NULL;
if (_os_mem_cmp(phl_to_drvpriv(phl_info),
bc_addr, phl_sta->mac_addr, MAC_ALEN) == 0)
_phl_macid_map_clr(macid_ctl->bmc_map, phl_sta->macid);
phl_status = RTW_PHL_STATUS_SUCCESS;
_os_spinunlock(phl_to_drvpriv(phl_info), &macid_ctl->lock, _bh, NULL);
exit:
PHL_INFO("%s release macid:%d - %02x:%02x:%02x:%02x:%02x:%02x \n",
__func__,
phl_sta->macid,
phl_sta->mac_addr[0], phl_sta->mac_addr[1], phl_sta->mac_addr[2],
phl_sta->mac_addr[3], phl_sta->mac_addr[4], phl_sta->mac_addr[5]);
phl_sta->macid = invalid_macid;
return phl_status;
}
u16 _phl_get_macid(struct phl_info_t *phl_info,
struct rtw_phl_stainfo_t *phl_sta)
{
/* TODO: macid management */
return phl_sta->macid;
}
/**
* This function export to core layer use
* to get phl role bmc macid
* @phl: see phl_info_t
* @wrole: wifi role
*/
u16
rtw_phl_wrole_bcmc_id_get(void *phl, struct rtw_wifi_role_t *wrole)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct macid_ctl_t *macid_ctl = phl_to_mac_ctrl(phl_info);
return macid_ctl->wrole_bmc[wrole->id];
}
/**
* This function export to core layer use
* to get maximum macid number
* @phl: see phl_info_t
*/
u16
rtw_phl_get_macid_max_num(void *phl)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct macid_ctl_t *macid_ctl = phl_to_mac_ctrl(phl_info);
return macid_ctl->max_num;
}
/**
* This function export to core layer use
* to check macid is bmc or not
* @phl: see phl_info_t
* @macid: macid
*/
u8
rtw_phl_macid_is_bmc(void *phl, u16 macid)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct macid_ctl_t *macid_ctl = phl_to_mac_ctrl(phl_info);
if (macid >= macid_ctl->max_num) {
PHL_ERR("%s macid(%d) is invalid\n", __func__, macid);
return true;
}
return _phl_macid_is_used(macid_ctl->bmc_map, macid);
}
/**
* This function export to core layer use
* to check macid is used or not
* @phl: see phl_info_t
* @macid: macid
*/
u8
rtw_phl_macid_is_used(void *phl, u16 macid)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct macid_ctl_t *macid_ctl = phl_to_mac_ctrl(phl_info);
if (macid >= macid_ctl->max_num) {
PHL_ERR("%s macid(%d) is invalid\n", __func__, macid);
return true;
}
return _phl_macid_is_used(macid_ctl->used_map, macid);
}
/**
* This function is used to
* check macid shared by all wifi role
* @phl: see phl_info_t
* @macid: macid
*/
u8
rtw_phl_macid_is_wrole_shared(void *phl, u16 macid)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct macid_ctl_t *macid_ctl = phl_to_mac_ctrl(phl_info);
int i = 0;
u8 iface_bmp = 0;
if (macid >= macid_ctl->max_num) {
PHL_ERR("%s macid(%d) is invalid\n", __func__, macid);
return false;
}
for (i = 0; i < MAX_WIFI_ROLE_NUMBER; i++) {
if (_phl_macid_is_used(&macid_ctl->wifi_role_usedmap[i][0], macid)) {
if (iface_bmp)
return true;
iface_bmp |= BIT(i);
}
}
return false;
}
/**
* This function is used to
* check macid not shared by all wifi role
* and belong to wifi role
* @phl: see phl_info_t
* @macid: macid
* @wrole: check id belong to this wifi role
*/
u8
rtw_phl_macid_is_wrole_specific(void *phl,
u16 macid, struct rtw_wifi_role_t *wrole)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct macid_ctl_t *macid_ctl = phl_to_mac_ctrl(phl_info);
int i = 0;
u8 iface_bmp = 0;
if (macid >= macid_ctl->max_num) {
PHL_ERR("%s macid(%d) invalid\n", __func__, macid);
return false;
}
for (i = 0; i < MAX_WIFI_ROLE_NUMBER; i++) {
if (_phl_macid_is_used(&macid_ctl->wifi_role_usedmap[i][0], macid)) {
if (iface_bmp || i != wrole->id)
return false;
iface_bmp |= BIT(i);
}
}
return iface_bmp ? true : false;
}
/*********** stainfo_ctrl section ***********/
static enum rtw_phl_status
_phl_stainfo_init(struct phl_info_t *phl_info,
struct rtw_phl_stainfo_t *phl_sta)
{
void *drv = phl_to_drvpriv(phl_info);
INIT_LIST_HEAD(&phl_sta->list);
_os_spinlock_init(drv, &phl_sta->tid_rx_lock);
_os_mem_set(drv, phl_sta->tid_rx, 0, sizeof(phl_sta->tid_rx));
_os_event_init(drv, &phl_sta->comp_sync);
_os_init_timer(drv, &phl_sta->reorder_timer,
phl_sta_rx_reorder_timer_expired, phl_sta, "reorder_timer");
_os_atomic_set(drv, &phl_sta->ps_sta, 0);
if (rtw_hal_stainfo_init(phl_info->hal, phl_sta) !=
RTW_HAL_STATUS_SUCCESS) {
PHL_ERR("hal_stainfo_init failed\n");
FUNCOUT();
return RTW_PHL_STATUS_FAILURE;
}
phl_sta->active = false;
return RTW_PHL_STATUS_SUCCESS;
}
enum rtw_phl_status
_phl_stainfo_deinit(struct phl_info_t *phl_info,
struct rtw_phl_stainfo_t *phl_sta)
{
void *drv = phl_to_drvpriv(phl_info);
_os_release_timer(drv, &phl_sta->reorder_timer);
_os_spinlock_free(phl_to_drvpriv(phl_info), &phl_sta->tid_rx_lock);
_os_event_free(drv, &phl_sta->comp_sync);
if (rtw_hal_stainfo_deinit(phl_info->hal, phl_sta)!=
RTW_HAL_STATUS_SUCCESS) {
PHL_ERR("hal_stainfo_deinit failed\n");
FUNCOUT();
return RTW_PHL_STATUS_FAILURE;
}
return RTW_PHL_STATUS_SUCCESS;
}
enum rtw_phl_status
phl_stainfo_enqueue(struct phl_info_t *phl_info,
struct phl_queue *sta_queue,
struct rtw_phl_stainfo_t *psta)
{
void *drv = phl_to_drvpriv(phl_info);
if (!psta)
return RTW_PHL_STATUS_FAILURE;
_os_spinlock(drv, &sta_queue->lock, _bh, NULL);
list_add_tail(&psta->list, &sta_queue->queue);
sta_queue->cnt++;
_os_spinunlock(drv, &sta_queue->lock, _bh, NULL);
return RTW_PHL_STATUS_SUCCESS;
}
struct rtw_phl_stainfo_t *
phl_stainfo_dequeue(struct phl_info_t *phl_info,
struct phl_queue *sta_queue)
{
struct rtw_phl_stainfo_t *psta = NULL;
void *drv = phl_to_drvpriv(phl_info);
_os_spinlock(drv, &sta_queue->lock, _bh, NULL);
if (list_empty(&sta_queue->queue)) {
psta = NULL;
} else {
psta = list_first_entry(&sta_queue->queue,
struct rtw_phl_stainfo_t, list);
list_del(&psta->list);
sta_queue->cnt--;
}
_os_spinunlock(drv, &sta_queue->lock, _bh, NULL);
return psta;
}
enum rtw_phl_status
phl_stainfo_queue_del(struct phl_info_t *phl_info,
struct phl_queue *sta_queue,
struct rtw_phl_stainfo_t *psta)
{
void *drv = phl_to_drvpriv(phl_info);
if (!psta)
return RTW_PHL_STATUS_FAILURE;
_os_spinlock(drv, &sta_queue->lock, _bh, NULL);
if (sta_queue->cnt) {
list_del(&psta->list);
sta_queue->cnt--;
}
_os_spinunlock(drv, &sta_queue->lock, _bh, NULL);
return RTW_PHL_STATUS_SUCCESS;
}
struct rtw_phl_stainfo_t *
phl_stainfo_queue_search(struct phl_info_t *phl_info,
struct phl_queue *sta_queue,
u8 *addr)
{
struct rtw_phl_stainfo_t *sta = NULL;
_os_list *sta_list = &sta_queue->queue;
void *drv = phl_to_drvpriv(phl_info);
bool sta_found = false;
_os_spinlock(drv, &sta_queue->lock, _bh, NULL);
if (list_empty(sta_list) == true)
goto _exit;
phl_list_for_loop(sta, struct rtw_phl_stainfo_t, sta_list, list) {
if (_os_mem_cmp(phl_to_drvpriv(phl_info),
sta->mac_addr, addr, MAC_ALEN) == 0) {
sta_found = true;
break;
}
}
_exit:
_os_spinunlock(drv, &sta_queue->lock, _bh, NULL);
if (sta_found == false)
sta = NULL;
return sta;
}
struct rtw_phl_stainfo_t *
phl_stainfo_queue_get_first(struct phl_info_t *phl_info,
struct phl_queue *sta_queue)
{
_os_list *sta_list = &sta_queue->queue;
void *drv = phl_to_drvpriv(phl_info);
struct rtw_phl_stainfo_t *sta = NULL;
/* first sta info in assoc_sta_queu is self sta info */
_os_spinlock(drv, &sta_queue->lock, _bh, NULL);
if (list_empty(sta_list) == true)
goto _exit;
sta = list_first_entry(sta_list, struct rtw_phl_stainfo_t, list);
_exit :
_os_spinunlock(drv, &sta_queue->lock, _bh, NULL);
return sta;
}
enum rtw_phl_status
phl_stainfo_ctrl_deinie(struct phl_info_t *phl_info)
{
struct stainfo_ctl_t *sta_ctrl = phl_to_sta_ctrl(phl_info);
void *drv = phl_to_drvpriv(phl_info);
struct rtw_phl_stainfo_t *psta = NULL;
struct phl_queue *fsta_queue = &sta_ctrl->free_sta_queue;
FUNCIN();
do {
psta = phl_stainfo_dequeue(phl_info, fsta_queue);
if (psta)
_phl_stainfo_deinit(phl_info, psta);
}while (psta != NULL);
pq_deinit(drv, fsta_queue);
if (sta_ctrl->allocated_stainfo_buf)
_os_mem_free(drv, sta_ctrl->allocated_stainfo_buf,
sta_ctrl->allocated_stainfo_sz);
FUNCOUT();
return RTW_PHL_STATUS_SUCCESS;
}
enum rtw_phl_status
phl_stainfo_ctrl_init(struct phl_info_t *phl_info)
{
struct stainfo_ctl_t *sta_ctrl = phl_to_sta_ctrl(phl_info);
void *drv = phl_to_drvpriv(phl_info);
struct rtw_phl_stainfo_t *psta = NULL;
enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
struct phl_queue *fsta_queue = NULL;
u16 i;
bool sta_init_fail = false;
FUNCIN();
sta_ctrl->phl_info = phl_info;
sta_ctrl->allocated_stainfo_sz = sizeof(struct rtw_phl_stainfo_t) * PHL_MAX_STA_NUM;
#ifdef MEM_ALIGNMENT
sta_ctrl->allocated_stainfo_sz += MEM_ALIGNMENT_OFFSET;
#endif
sta_ctrl->allocated_stainfo_buf =
_os_mem_alloc(drv, sta_ctrl->allocated_stainfo_sz);
if (!sta_ctrl->allocated_stainfo_buf) {
PHL_ERR("allocate stainfo buf failed\n");
goto _exit;
}
sta_ctrl->stainfo_buf = sta_ctrl->allocated_stainfo_buf;
#ifdef MEM_ALIGNMENT
if (sta_ctrl->stainfo_buf & MEM_ALIGNMENT_PADDING)
sta_ctrl->stainfo_buf += MEM_ALIGNMENT_OFFSET -
(sta_ctrl->stainfo_buf & MEM_ALIGNMENT_PADDING);
#endif
fsta_queue = &sta_ctrl->free_sta_queue;
pq_init(drv, fsta_queue);
psta = (struct rtw_phl_stainfo_t *)(sta_ctrl->stainfo_buf);
for (i = 0; i < PHL_MAX_STA_NUM; i++) {
if (_phl_stainfo_init(phl_info, psta) != RTW_PHL_STATUS_SUCCESS) {
sta_init_fail = true;
break;
}
phl_stainfo_enqueue(phl_info, fsta_queue, psta);
psta++;
}
if (sta_init_fail == true) {
PHL_ERR("sta_init failed\n");
phl_stainfo_ctrl_deinie(phl_info);
goto _exit;
}
PHL_DUMP_STACTRL_EX(phl_info);
pstatus = RTW_PHL_STATUS_SUCCESS;
_exit:
FUNCOUT();
return pstatus;
}
/*********** phl stainfo section ***********/
#ifdef DBG_PHL_STAINFO
void
phl_dump_stactrl(const char *caller, const int line, bool show_caller,
struct phl_info_t *phl_info)
{
struct rtw_phl_com_t *phl_com = phl_info->phl_com;
u8 ridx = MAX_WIFI_ROLE_NUMBER;
struct rtw_wifi_role_t *role;
struct stainfo_ctl_t *sta_ctrl = NULL;
sta_ctrl = phl_to_sta_ctrl(phl_info);
if (show_caller)
PHL_INFO("[PSTA] ###### FUN - %s LINE - %d #######\n", caller, line);
PHL_INFO("[PSTA] PHL_MAX_STA_NUM:%d\n", PHL_MAX_STA_NUM);
PHL_INFO("[PSTA] sta_ctrl - q_cnt :%d\n", sta_ctrl->free_sta_queue.cnt);
for (ridx = 0; ridx < MAX_WIFI_ROLE_NUMBER; ridx++) {
role = &(phl_com->wifi_roles[ridx]);
PHL_INFO("[PSTA] wrole_%d asoc_q cnt :%d\n",
ridx, role->assoc_sta_queue.cnt);
}
if (show_caller)
PHL_INFO("#################################\n");
}
static void _phl_dump_stainfo(struct rtw_phl_stainfo_t *phl_sta)
{
PHL_INFO("\t[STA] MAC-ID:%d, AID:%d, MAC-ADDR:%02x-%02x-%02x-%02x-%02x-%02x, Active:%s\n",
phl_sta->macid, phl_sta->aid,
phl_sta->mac_addr[0],phl_sta->mac_addr[1],phl_sta->mac_addr[2],
phl_sta->mac_addr[3],phl_sta->mac_addr[4],phl_sta->mac_addr[5],
(phl_sta->active) ? "Y" : "N");
PHL_INFO("\t[STA] WROLE-IDX:%d wlan_mode:0x%02x\n", phl_sta->wrole->id, phl_sta->wmode);
PHL_DUMP_CHAN_DEF(&phl_sta->chandef);
/****** statistic ******/
PHL_INFO("\t[STA] TP -[Tx:%d Rx :%d BI:N/A] (KBits)\n",
phl_sta->stats.tx_tp_kbits, phl_sta->stats.rx_tp_kbits);
PHL_INFO("\t[STA] Total -[Tx:%llu Rx :%llu BI:N/A] (Bytes)\n",
phl_sta->stats.tx_byte_total, phl_sta->stats.rx_byte_total);
/****** asoc_cap ******/
/****** protect ******/
/****** sec_mode ******/
/****** rssi_stat ******/
PHL_INFO("\t\t[HAL STA] rssi:%d assoc_rssi:%d, ofdm:%d, cck:%d, rssi_ma:%d, ma_rssi:%d\n",
(phl_sta->hal_sta->rssi_stat.rssi >> 1), phl_sta->hal_sta->rssi_stat.assoc_rssi,
(phl_sta->hal_sta->rssi_stat.rssi_ofdm >> 1), (phl_sta->hal_sta->rssi_stat.rssi_cck >> 1),
(phl_sta->hal_sta->rssi_stat.rssi_ma >> 5), phl_sta->hal_sta->rssi_stat.ma_rssi);
/****** ra_info ******/
PHL_INFO("\t\t[HAL STA] - RA info\n");
PHL_INFO("\t\t[HAL STA] Tx rate:0x%04x ra_bw_mode:%d, curr_tx_bw:%d\n",
phl_sta->hal_sta->ra_info.curr_tx_rate,
phl_sta->hal_sta->ra_info.ra_bw_mode,
phl_sta->hal_sta->ra_info.curr_tx_bw);
PHL_INFO("\t\t[HAL STA] dis_ra:%s ra_registered:%s\n",
(phl_sta->hal_sta->ra_info.dis_ra) ? "Y" : "N",
(phl_sta->hal_sta->ra_info.ra_registered) ? "Y" : "N");
PHL_INFO("\t\t[HAL STA] ra_mask:0x%08llx cur_ra_mask:0x%08llx, retry_ratio:%d\n",
phl_sta->hal_sta->ra_info.ra_mask,
phl_sta->hal_sta->ra_info.cur_ra_mask,
phl_sta->hal_sta->ra_info.curr_retry_ratio);
/****** ra_info - Report ******/
PHL_INFO("\t\t[HAL STA] RA Report: gi_ltf:%d rate_mode:%d, bw:%d, mcs_ss_idx:%d\n",
phl_sta->hal_sta->ra_info.rpt_rt_i.gi_ltf,
phl_sta->hal_sta->ra_info.rpt_rt_i.mode,
phl_sta->hal_sta->ra_info.rpt_rt_i.bw,
phl_sta->hal_sta->ra_info.rpt_rt_i.mcs_ss_idx);
PHL_INFO("\t\t[HAL STA] HAL rx_ok_cnt:%d rx_err_cnt:%d, rx_rate_plurality:%d\n\n",
phl_sta->hal_sta->trx_stat.rx_ok_cnt,
phl_sta->hal_sta->trx_stat.rx_err_cnt,
phl_sta->hal_sta->trx_stat.rx_rate_plurality);
}
void phl_dump_stainfo_all(const char *caller, const int line, bool show_caller,
struct phl_info_t *phl_info)
{
struct macid_ctl_t *macid_ctl = phl_to_mac_ctrl(phl_info);
struct rtw_phl_stainfo_t *phl_sta = NULL;
u16 max_macid_num = 0;
u16 mid = 0;
if (show_caller)
PHL_INFO("###### FUN - %s LINE - %d #######\n", caller, line);
max_macid_num = macid_ctl->max_num;
PHL_INFO("max_macid_num:%d\n", max_macid_num);
_os_spinlock(phl_to_drvpriv(phl_info), &macid_ctl->lock, _bh, NULL);
for(mid = 0; mid < max_macid_num; mid++) {
if (_phl_macid_is_used(macid_ctl->used_map, mid)) {
phl_sta = macid_ctl->sta[mid];
if (phl_sta)
_phl_dump_stainfo(phl_sta);
}
}
_os_spinunlock(phl_to_drvpriv(phl_info), &macid_ctl->lock, _bh, NULL);
if (show_caller)
PHL_INFO("#################################\n");
}
const char *const _rtype_str[] = {
"NONE",
"STA",
"AP",
"VAP",
"ADHOC",
"MASTER",
"MESH",
"MONITOR",
"PD",
"GC",
"GO",
"TDLS",
"NAN",
"NONE"
};
void phl_dump_stainfo_per_role(const char *caller, const int line, bool show_caller,
struct phl_info_t *phl_info, struct rtw_wifi_role_t *wrole)
{
void *drv = phl_to_drvpriv(phl_info);
struct rtw_phl_stainfo_t *sta = NULL;
int sta_cnt = 0;
if (show_caller)
PHL_INFO("[STA] ###### FUN - %s LINE - %d #######\n", caller, line);
PHL_INFO("WR_IDX:%d RTYPE:%s, mac-addr:%02x-%02x-%02x-%02x-%02x-%02x\n",
wrole->id,
_rtype_str[wrole->type],
wrole->mac_addr[0], wrole->mac_addr[1], wrole->mac_addr[2],
wrole->mac_addr[3], wrole->mac_addr[4], wrole->mac_addr[5]);
_os_spinlock(drv, &wrole->assoc_sta_queue.lock, _bh, NULL);
if (wrole->type == PHL_RTYPE_STATION && wrole->mstate == MLME_LINKED)
sta_cnt = 1;
else if (wrole->type == PHL_RTYPE_TDLS)
sta_cnt = wrole->assoc_sta_queue.cnt;
else
sta_cnt = wrole->assoc_sta_queue.cnt - 1;
PHL_INFO("assoced STA num: %d\n", sta_cnt);
phl_list_for_loop(sta, struct rtw_phl_stainfo_t, &wrole->assoc_sta_queue.queue, list) {
if (sta)
_phl_dump_stainfo(sta);
}
_os_spinunlock(drv, &wrole->assoc_sta_queue.lock, _bh, NULL);
if (show_caller)
PHL_INFO("#################################\n");
}
void rtw_phl_sta_dump_info(void *phl, bool show_caller, struct rtw_wifi_role_t *wr, u8 mode)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
if (mode == 1) {
if (show_caller) {
PHL_DUMP_STACTRL_EX(phl_info);
} else {
PHL_DUMP_STACTRL(phl_info);
}
} else if (mode == 2) {
if (show_caller) {
PHL_DUMP_STAINFO_EX(phl_info);
} else {
PHL_DUMP_STAINFO(phl_info);
}
} else if (mode == 3) {
if (show_caller) {
PHL_DUMP_ROLE_STAINFO_EX(phl_info, wr);
} else {
PHL_DUMP_ROLE_STAINFO(phl_info, wr);
}
} else {
if (show_caller) {
PHL_DUMP_STACTRL_EX(phl_info);
PHL_DUMP_STAINFO_EX(phl_info);
PHL_DUMP_ROLE_STAINFO_EX(phl_info, wr);
}
else {
PHL_DUMP_STACTRL(phl_info);
PHL_DUMP_STAINFO(phl_info);
PHL_DUMP_ROLE_STAINFO(phl_info, wr);
}
}
}
#endif /*DBG_PHL_STAINFO*/
static bool _phl_self_stainfo_chk(struct phl_info_t *phl_info,
struct rtw_wifi_role_t *wrole, struct rtw_phl_stainfo_t *sta)
{
void *drv = phl_to_drvpriv(phl_info);
bool is_self = false;
switch (wrole->type) {
case PHL_RTYPE_STATION:
case PHL_RTYPE_P2P_GC:
_os_mem_cpy(drv, sta->mac_addr, wrole->mac_addr, MAC_ALEN);
is_self = true;
break;
case PHL_RTYPE_AP:
case PHL_RTYPE_MESH:
case PHL_RTYPE_P2P_GO:
case PHL_RTYPE_TDLS:
if (_os_mem_cmp(drv, wrole->mac_addr, sta->mac_addr, MAC_ALEN) == 0)
is_self = true;
break;
case PHL_RTYPE_NONE:
case PHL_RTYPE_VAP:
case PHL_RTYPE_ADHOC:
case PHL_RTYPE_ADHOC_MASTER:
case PHL_RTYPE_MONITOR:
case PHL_RTYPE_P2P_DEVICE:
case PHL_RTYPE_NAN:
case PHL_MLME_MAX:
PHL_TRACE(COMP_PHL_DBG, _PHL_ERR_, "_phl_self_stainfo_chk(): Unsupported case:%d, please check it\n",
wrole->type);
break;
default:
PHL_TRACE(COMP_PHL_DBG, _PHL_ERR_, "_phl_self_stainfo_chk(): role-type(%d) not recognize\n",
wrole->type);
break;
}
return is_self;
}
enum rtw_phl_status
phl_free_stainfo_sw(struct phl_info_t *phl_info, struct rtw_phl_stainfo_t *sta)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
if(sta == NULL) {
PHL_ERR("%s sta is NULL\n", __func__);
return RTW_PHL_STATUS_FAILURE;
}
phl_free_rx_reorder(phl_info, sta);
pstatus = phl_deregister_tx_ring((void *)phl_info, sta->macid);
if (pstatus != RTW_PHL_STATUS_SUCCESS) {
PHL_ERR("macid(%d) phl_deregister_tx_ring failed\n", sta->macid);
}
/* release macid for used_map */
pstatus = _phl_release_macid(phl_info, sta);
if (pstatus != RTW_PHL_STATUS_SUCCESS)
PHL_ERR("_phl_release_macid failed\n");
return pstatus;
}
enum rtw_phl_status
__phl_free_stainfo_sw(struct phl_info_t *phl_info, struct rtw_phl_stainfo_t *sta)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
struct stainfo_ctl_t *sta_ctrl = phl_to_sta_ctrl(phl_info);
struct rtw_wifi_role_t *wrole = NULL;
FUNCIN();
if(sta == NULL) {
PHL_ERR("%s sta is NULL\n", __func__);
goto _exit;
}
wrole = sta->wrole;
if (!is_broadcast_mac_addr(sta->mac_addr)) {
if (_phl_self_stainfo_chk(phl_info, wrole, sta) == true) {
pstatus = RTW_PHL_STATUS_SUCCESS;
goto _exit;
}
}
pstatus = phl_stainfo_queue_del(phl_info, &wrole->assoc_sta_queue, sta);
if (pstatus != RTW_PHL_STATUS_SUCCESS) {
PHL_ERR("phl_stainfo_queue_del failed\n");
}
pstatus = phl_free_stainfo_sw(phl_info, sta);
if (pstatus != RTW_PHL_STATUS_SUCCESS) {
PHL_ERR("macid(%d) _phl_free_stainfo_sw failed\n", sta->macid);
}
pstatus = phl_stainfo_enqueue(phl_info, &sta_ctrl->free_sta_queue, sta);
if (pstatus != RTW_PHL_STATUS_SUCCESS)
PHL_ERR("phl_stainfo_enqueue to free queue failed\n");
#ifdef RTW_WKARD_AP_CLIENT_ADD_DEL_NTY
if ((wrole->type == PHL_RTYPE_AP) ||
(wrole->type == PHL_RTYPE_VAP) ||
(wrole->type == PHL_RTYPE_MESH) ||
(wrole->type == PHL_RTYPE_P2P_GO))
phl_role_ap_client_notify(phl_info, wrole, MLME_NO_LINK);
#endif
_exit:
PHL_DUMP_STACTRL_EX(phl_info);
FUNCOUT();
return pstatus;
}
enum rtw_phl_status
rtw_phl_free_stainfo_sw(void *phl, struct rtw_phl_stainfo_t *sta)
{
return __phl_free_stainfo_sw((struct phl_info_t *)phl, sta);
}
enum rtw_phl_status
phl_free_stainfo_hw(struct phl_info_t *phl_info,
struct rtw_phl_stainfo_t *sta)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
if (sta == NULL) {
PHL_ERR("%s sta == NULL\n", __func__);
goto _exit;
}
sta->active = false;
if (rtw_hal_del_sta_entry(phl_info->hal, sta) == RTW_HAL_STATUS_SUCCESS)
pstatus = RTW_PHL_STATUS_SUCCESS;
else
PHL_ERR("rtw_hal_del_sta_entry failed\n");
_exit:
return pstatus;
}
enum rtw_phl_status
__phl_free_stainfo_hw(struct phl_info_t *phl_info, struct rtw_phl_stainfo_t *sta)
{
struct rtw_wifi_role_t *wrole = sta->wrole;
if (!is_broadcast_mac_addr(sta->mac_addr)) {
if (_phl_self_stainfo_chk(phl_info, wrole, sta) == true)
return RTW_PHL_STATUS_SUCCESS;
}
return phl_free_stainfo_hw(phl_info, sta);
}
static enum rtw_phl_status
__phl_free_stainfo(struct phl_info_t *phl, struct rtw_phl_stainfo_t *sta)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
pstatus = __phl_free_stainfo_hw(phl, sta);
if (pstatus != RTW_PHL_STATUS_SUCCESS)
PHL_ERR("__phl_free_stainfo_hw failed\n");
pstatus = __phl_free_stainfo_sw(phl, sta);
if (pstatus != RTW_PHL_STATUS_SUCCESS)
PHL_ERR("__phl_free_stainfo_sw failed\n");
return pstatus;
}
static enum rtw_phl_status
_phl_alloc_stainfo_sw(struct phl_info_t *phl_info,struct rtw_phl_stainfo_t *sta)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
pstatus = _phl_alloc_macid(phl_info, sta);
if (pstatus != RTW_PHL_STATUS_SUCCESS) {
PHL_ERR("%s allocate macid failure!\n", __func__);
goto error_alloc_macid;
}
if (phl_register_tx_ring(phl_info, sta->macid,
sta->wrole->hw_band, sta->wrole->hw_wmm, sta->wrole->hw_port) !=
RTW_PHL_STATUS_SUCCESS) {
PHL_ERR("%s register_tx_ring failure!\n", __func__);
goto error_register_tx_ring;
}
pstatus = RTW_PHL_STATUS_SUCCESS;
return pstatus;
error_register_tx_ring:
_phl_release_macid(phl_info, sta);
error_alloc_macid:
return pstatus;
}
static void _phl_sta_set_default_value(struct phl_info_t *phl_info,
struct rtw_phl_stainfo_t *phl_sta)
{
phl_sta->bcn_hit_cond = 0; /* beacon:A3 probersp: A1 & A3 */
/* fit rule
* 0: A1 & A2
* 1: A1 & A3
*
* Rule 0 should be used for both AP and STA modes.
*
* For STA, A3 is source address(SA) which can be any peer on the LAN.
*
* For AP, A3 is destination address(DA) which can also be any node
* on the LAN. A1 & A2 match find the address CAM entry that contains the
* correct security CAM ID and MAC ID.
*/
phl_sta->hit_rule = 0;
}
struct rtw_phl_stainfo_t *
phl_alloc_stainfo_sw(struct phl_info_t *phl_info,
u8 *sta_addr,
struct rtw_wifi_role_t *wrole)
{
struct stainfo_ctl_t *sta_ctrl = phl_to_sta_ctrl(phl_info);
struct rtw_phl_stainfo_t *phl_sta = NULL;
void *drv = phl_to_drvpriv(phl_info);
bool bmc_sta = false;
FUNCIN();
if (is_broadcast_mac_addr(sta_addr))
bmc_sta = true;
/* if sta_addr is bmc addr, allocate new sta_info */
if (wrole->type == PHL_RTYPE_STATION && !bmc_sta) {
phl_sta = rtw_phl_get_stainfo_self(phl_info, wrole);
if (phl_sta) {
_os_mem_cpy(drv, phl_sta->mac_addr, sta_addr, MAC_ALEN);
goto _exit;
}
}
/* check station info exist */
phl_sta = rtw_phl_get_stainfo_by_addr(phl_info, wrole, sta_addr);
if (phl_sta) {
PHL_INFO("%s phl_sta(%02x:%02x:%02x:%02x:%02x:%02x) exist\n",
__func__, sta_addr[0], sta_addr[1], sta_addr[2],
sta_addr[3], sta_addr[4], sta_addr[5]);
goto _exit;
}
phl_sta = phl_stainfo_dequeue(phl_info, &sta_ctrl->free_sta_queue);
if (phl_sta == NULL) {
PHL_ERR("allocate phl_sta failure!\n");
goto _exit;
}
_os_mem_cpy(drv, phl_sta->mac_addr, sta_addr, MAC_ALEN);
phl_sta->wrole = wrole;
if (_phl_alloc_stainfo_sw(phl_info, phl_sta) != RTW_PHL_STATUS_SUCCESS) {
PHL_ERR("_phl_alloc_stainfo_sw failed\n");
goto error_alloc_sta;
}
_phl_sta_set_default_value(phl_info, phl_sta);
phl_stainfo_enqueue(phl_info, &wrole->assoc_sta_queue, phl_sta);
#ifdef RTW_WKARD_AP_CLIENT_ADD_DEL_NTY
if (_phl_self_stainfo_chk(phl_info, wrole, phl_sta) == false) {
if ((wrole->type == PHL_RTYPE_AP) ||
(wrole->type == PHL_RTYPE_VAP) ||
(wrole->type == PHL_RTYPE_MESH) ||
(wrole->type == PHL_RTYPE_P2P_GO)) {
phl_role_ap_client_notify(phl_info, wrole, MLME_LINKING);
}
}
#endif
_exit:
PHL_DUMP_STACTRL_EX(phl_info);
FUNCOUT();
return phl_sta;
error_alloc_sta:
phl_stainfo_enqueue(phl_info, &sta_ctrl->free_sta_queue, phl_sta);
phl_sta = NULL;
PHL_DUMP_STACTRL_EX(phl_info);
FUNCOUT();
return phl_sta;
}
struct rtw_phl_stainfo_t *
rtw_phl_alloc_stainfo_sw(void *phl, u8 *sta_addr,
struct rtw_wifi_role_t *wrole)
{
return phl_alloc_stainfo_sw((struct phl_info_t *)phl, sta_addr, wrole);
}
enum rtw_phl_status
phl_alloc_stainfo_hw(struct phl_info_t *phl_info, struct rtw_phl_stainfo_t *sta)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
if (sta == NULL) {
PHL_ERR("%s sta == NULL\n", __func__);
goto _exit;
}
if (rtw_hal_add_sta_entry(phl_info->hal, sta) != RTW_HAL_STATUS_SUCCESS) {
PHL_ERR("%s rtw_hal_add_sta_entry failure!\n", __func__);
} else {
sta->active = true;
pstatus = RTW_PHL_STATUS_SUCCESS;
}
_exit:
return pstatus;
}
enum rtw_phl_status
__phl_alloc_stainfo_hw(struct phl_info_t *phl_info, struct rtw_phl_stainfo_t *sta)
{
return phl_alloc_stainfo_hw(phl_info, sta);
}
static enum rtw_phl_status
__phl_alloc_stainfo(struct phl_info_t *phl,
struct rtw_phl_stainfo_t **sta,
u8 *sta_addr,
struct rtw_wifi_role_t *wrole)
{
struct rtw_phl_stainfo_t *alloc_sta = NULL;
enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
alloc_sta = phl_alloc_stainfo_sw(phl, sta_addr, wrole);
if (alloc_sta == NULL) {
PHL_ERR("%s can't alloc stainfo\n", __func__);
*sta = alloc_sta;
goto _exit;
}
if (alloc_sta->active == false) {
pstatus = __phl_alloc_stainfo_hw(phl, alloc_sta);
if (pstatus != RTW_PHL_STATUS_SUCCESS) {
PHL_ERR("__phl_alloc_stainfo_hw failed\n");
goto _err_alloc_sta_hw;
}
}
PHL_INFO("%s success - macid:%u %02x:%02x:%02x:%02x:%02x:%02x\n",
__func__, alloc_sta->macid,
alloc_sta->mac_addr[0], alloc_sta->mac_addr[1], alloc_sta->mac_addr[2],
alloc_sta->mac_addr[3], alloc_sta->mac_addr[4], alloc_sta->mac_addr[5]);
*sta = alloc_sta;
return RTW_PHL_STATUS_SUCCESS;
_err_alloc_sta_hw:
__phl_free_stainfo_sw(phl, alloc_sta);
*sta = alloc_sta = NULL;
_exit:
return RTW_PHL_STATUS_FAILURE;
}
static enum rtw_phl_status
_phl_alloc_stainfo(struct phl_info_t *phl,
struct rtw_phl_stainfo_t **sta,
u8 *sta_addr,
struct rtw_wifi_role_t *wrole,
bool alloc,
bool only_hw)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
if (alloc) {
if (only_hw)
pstatus = __phl_alloc_stainfo_hw(phl, *sta);
else
pstatus = __phl_alloc_stainfo(phl, sta, sta_addr, wrole);
} else {
if (only_hw)
pstatus = __phl_free_stainfo_hw(phl, *sta);
else
pstatus = __phl_free_stainfo(phl, *sta);
}
return pstatus;
}
#ifdef CONFIG_CMD_DISP
struct cmd_stainfo_param {
struct rtw_phl_stainfo_t **sta;
u8 sta_addr[MAC_ALEN];
struct rtw_wifi_role_t *wrole;
bool alloc;
bool only_hw;
};
static void
_phl_cmd_alloc_stainfo_done(void *drv_priv,
u8 *cmd,
u32 cmd_len,
enum rtw_phl_status status)
{
if (cmd)
_os_kmem_free(drv_priv, cmd, cmd_len);
}
static enum rtw_phl_status
_phl_cmd_alloc_stainfo(struct phl_info_t *phl_info,
struct rtw_phl_stainfo_t **sta,
u8 *sta_addr,
struct rtw_wifi_role_t *wrole,
bool alloc, bool only_hw,
enum phl_cmd_type cmd_type,
u32 cmd_timeout)
{
void *drv = phl_to_drvpriv(phl_info);
enum rtw_phl_status psts = RTW_PHL_STATUS_FAILURE;
struct cmd_stainfo_param *param = NULL;
u32 param_len = 0;
if (cmd_type == PHL_CMD_DIRECTLY) {
psts = _phl_alloc_stainfo(phl_info, sta, sta_addr, wrole, alloc, only_hw);
goto _exit;
}
param_len = sizeof(struct cmd_stainfo_param);
param = _os_kmem_alloc(drv, param_len);
if (param == NULL) {
PHL_ERR("%s: alloc param failed!\n", __func__);
psts = RTW_PHL_STATUS_RESOURCE;
goto _exit;
}
_os_mem_set(drv, param, 0, param_len);
param->sta = sta;
_os_mem_cpy(drv, param->sta_addr, sta_addr, MAC_ALEN);
param->wrole = wrole;
param->alloc = alloc;
param->only_hw = only_hw;
psts = phl_cmd_enqueue(phl_info,
wrole->hw_band,
MSG_EVT_STA_INFO_CTRL,
(u8 *)param,
param_len,
_phl_cmd_alloc_stainfo_done,
cmd_type,
cmd_timeout);
if (is_cmd_failure(psts)) {
/* Send cmd success, but wait cmd fail*/
psts = RTW_PHL_STATUS_FAILURE;
} else if (psts != RTW_PHL_STATUS_SUCCESS) {
/* Send cmd fail */
psts = RTW_PHL_STATUS_FAILURE;
_os_kmem_free(drv, param, param_len);
}
_exit:
return psts;
}
enum rtw_phl_status
phl_cmd_alloc_stainfo_hdl(struct phl_info_t *phl_info, u8 *param)
{
struct cmd_stainfo_param *cmd_sta_param = (struct cmd_stainfo_param *)param;
return _phl_alloc_stainfo(phl_info,
cmd_sta_param->sta,
cmd_sta_param->sta_addr,
cmd_sta_param->wrole,
cmd_sta_param->alloc,
cmd_sta_param->only_hw);
}
#endif /* CONFIG_CMD_DISP */
enum rtw_phl_status
rtw_phl_cmd_alloc_stainfo(void *phl,
struct rtw_phl_stainfo_t **sta,
u8 *sta_addr,
struct rtw_wifi_role_t *wrole,
bool alloc, bool only_hw,
enum phl_cmd_type cmd_type,
u32 cmd_timeout)
{
#ifdef CONFIG_CMD_DISP
return _phl_cmd_alloc_stainfo(phl, sta, sta_addr, wrole, alloc, only_hw, cmd_type, cmd_timeout);
#else
PHL_TRACE(COMP_PHL_DBG, _PHL_INFO_, "%s: not support alloc stainfo cmd\n",
__func__);
return _phl_alloc_stainfo((struct phl_info_t *)phl, sta, sta_addr, wrole, alloc, only_hw);
#endif /* CONFIG_CMD_DISP */
}
enum rtw_phl_status
phl_wifi_role_free_stainfo_hw(struct phl_info_t *phl_info,
struct rtw_wifi_role_t *wrole)
{
struct macid_ctl_t *mc = phl_to_mac_ctrl(phl_info);
u16 max_macid_num = mc->max_num;
struct rtw_phl_stainfo_t *sta = NULL;
u32 *used_map;
u16 mid;
used_map = &mc->wifi_role_usedmap[wrole->id][0];
for(mid = 0; mid < max_macid_num; mid++) {
if (_phl_macid_is_used(used_map, mid)) {
sta = mc->sta[mid];
if (sta) {
PHL_INFO("%s [WR-%d] free sta_info(MID:%d)\n",
__func__, wrole->id, sta->macid);
phl_free_stainfo_hw(phl_info, sta);
}
}
}
return RTW_PHL_STATUS_SUCCESS;
}
enum rtw_phl_status
phl_wifi_role_free_stainfo_sw(struct phl_info_t *phl_info,
struct rtw_wifi_role_t *role)
{
struct rtw_phl_stainfo_t *phl_sta = NULL;
struct stainfo_ctl_t *sta_ctrl = phl_to_sta_ctrl(phl_info);
PHL_DUMP_STACTRL_EX(phl_info);
do {
phl_sta = phl_stainfo_dequeue(phl_info, &role->assoc_sta_queue);
if (phl_sta) {
phl_free_stainfo_sw(phl_info, phl_sta);
phl_stainfo_enqueue(phl_info,
&sta_ctrl->free_sta_queue, phl_sta);
}
} while(phl_sta != NULL);
return RTW_PHL_STATUS_SUCCESS;
}
enum rtw_phl_status
phl_wifi_role_free_stainfo(struct phl_info_t *phl_info,
struct rtw_wifi_role_t *role)
{
struct rtw_phl_stainfo_t *phl_sta = NULL;
struct stainfo_ctl_t *sta_ctrl = phl_to_sta_ctrl(phl_info);
PHL_DUMP_STACTRL_EX(phl_info);
do {
phl_sta = phl_stainfo_dequeue(phl_info, &role->assoc_sta_queue);
if (phl_sta) {
phl_free_stainfo_hw(phl_info, phl_sta);
phl_free_stainfo_sw(phl_info, phl_sta);
phl_stainfo_enqueue(phl_info,
&sta_ctrl->free_sta_queue,
phl_sta);
}
} while(phl_sta != NULL);
return RTW_PHL_STATUS_SUCCESS;
}
static void
_phl_media_sta_notify(struct phl_info_t *phl_info,
struct rtw_phl_stainfo_t *sta, bool is_connect)
{
if (is_connect)
phl_pkt_ofld_add_entry(phl_info, sta->macid);
else
phl_pkt_ofld_del_entry(phl_info, sta->macid);
}
/**
* According to 802.11 spec 26.5.2.3.2
* We shall not transmit HE TB PPDU with RU-26 on DFS channel
*/
static void
_phl_set_dfs_tb_ctrl(struct phl_info_t *phl_info,
struct rtw_wifi_role_t *wrole)
{
struct rtw_regulation_channel reg_ch = {0};
enum band_type band = wrole->chandef.band;
u8 channel = wrole->chandef.chan;
bool is_dfs = false;
if (rtw_phl_regulation_query_ch(phl_info, band, channel, ®_ch)) {
if (reg_ch.property & CH_DFS)
is_dfs = true;
rtw_hal_set_dfs_tb_ctrl(phl_info->hal, is_dfs);
}
}
static void
_phl_no_link_reset_sta_info(struct phl_info_t *phl_info, struct rtw_phl_stainfo_t *sta)
{
void *drv = phl_to_drvpriv(phl_info);
/* asoc cap */
_os_mem_set(drv, &sta->asoc_cap, 0, sizeof(struct protocol_cap_t));
/* other capabilities under stainfo need to reset with default value */
sta->tf_trs = 0;
/* protection mode */
sta->protect = RTW_PROTECT_DISABLE;
}
/**
* This function is called once station associated with AP
* or incoming station got associated under AP mode.
* Before calling this function, update address / net_type / ...
* information of stainfo
* It will configure some hw register, ex
* address cam
* @phl: see phl_info_t
* @stainfo: information is updated through phl_station_info
*/
static enum rtw_phl_status
phl_update_media_status(struct phl_info_t *phl_info, struct rtw_phl_stainfo_t *sta,
u8 *sta_addr, bool is_connect)
{
struct rtw_wifi_role_t *wrole = sta->wrole;
void *drv = phl_to_drvpriv(phl_info);
enum rtw_hal_status hstatus = RTW_HAL_STATUS_FAILURE;
enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
bool is_sta_linked = false;
is_sta_linked = rtw_hal_is_sta_linked(phl_info->hal, sta);
if (is_connect == true && is_sta_linked == true) {
PHL_ERR("%s STA (MAC_ID:%d) had connected\n", __func__, sta->macid);
goto _exit;
}
if (is_connect == false && is_sta_linked == false) {
/* handle connect abort case */
if (wrole->mstate == MLME_LINKING) {
PHL_INFO("%s MAC_ID(%d) connect abort\n", __func__, sta->macid);
pstatus = RTW_PHL_STATUS_SUCCESS;
} else {
PHL_ERR("%s MAC_ID(%d) had disconnected\n", __func__, sta->macid);
}
if (wrole->type == PHL_RTYPE_STATION || wrole->type == PHL_RTYPE_P2P_GC)
wrole->mstate = MLME_NO_LINK;
goto _exit;
}
/* reset trx statistics */
if (is_connect == false) {
phl_reset_tx_stats(&sta->stats);
phl_reset_rx_stats(&sta->stats);
_phl_no_link_reset_sta_info(phl_info, sta);
CLEAR_STATUS_FLAG(wrole->status, WR_STATUS_TSF_SYNC);
} else {
phl_clean_sta_bcn_info(phl_info, sta);
}
/* Configure address cam, including net_type and sync_tsf */
if ((wrole->type == PHL_RTYPE_STATION) || (wrole->type == PHL_RTYPE_P2P_GC)
#ifdef CONFIG_PHL_TDLS
/* STA disconnects with the associated AP before tearing down with TDLS peers */
|| ((wrole->type == PHL_RTYPE_TDLS) && (!sta_addr))
#endif
) {
if (is_connect) {
wrole->mstate = MLME_LINKED;
_os_mem_cpy(drv, sta->mac_addr, sta_addr, MAC_ALEN);
_phl_set_dfs_tb_ctrl(phl_info, wrole);
} else {
wrole->mstate = MLME_NO_LINK;
}
}
#ifdef RTW_WKARD_AP_CLIENT_ADD_DEL_NTY
else if ((wrole->type == PHL_RTYPE_AP) ||
(wrole->type == PHL_RTYPE_VAP) ||
(wrole->type == PHL_RTYPE_MESH) ||
(wrole->type == PHL_RTYPE_P2P_GO)) {
if (is_connect)
phl_role_ap_client_notify(phl_info, wrole, MLME_LINKED);
}
#endif
hstatus = rtw_hal_update_sta_entry(phl_info->hal, sta, is_connect);
if (hstatus != RTW_HAL_STATUS_SUCCESS) {
PHL_ERR("rtw_hal_update_sta_entry failure!\n");
goto _exit;
}
if (wrole->type == PHL_RTYPE_STATION
#ifdef CONFIG_PHL_TDLS
/* STA disconnects with the associated AP before tearing down with TDLS peers */
|| ((wrole->type == PHL_RTYPE_TDLS) && (!sta_addr))
#endif
) {
hstatus = rtw_hal_role_cfg(phl_info->hal, wrole);
if (hstatus != RTW_HAL_STATUS_SUCCESS) {
PHL_ERR("rtw_hal_role_cfg failure!\n");
goto _exit;
}
}
_phl_media_sta_notify(phl_info, sta, is_connect);
pstatus = RTW_PHL_STATUS_SUCCESS;
/* TODO: Configure RCR */
_exit:
return pstatus;
}
#ifdef CONFIG_CMD_DISP
struct sta_media_param {
struct rtw_phl_stainfo_t *sta;
u8 sta_addr[MAC_ALEN];
bool is_connect;
};
enum rtw_phl_status
phl_update_media_status_hdl(struct phl_info_t *phl_info, u8 *param)
{
struct sta_media_param *media_sts = (struct sta_media_param *)param;
return phl_update_media_status(phl_info,
media_sts->sta, media_sts->sta_addr, media_sts->is_connect);
}
void phl_update_media_status_done(void *drv_priv, u8 *cmd, u32 cmd_len,
enum rtw_phl_status status)
{
if (cmd) {
_os_kmem_free(drv_priv, cmd, cmd_len);
cmd = NULL;
}
}
#endif
enum rtw_phl_status
rtw_phl_cmd_update_media_status(void *phl,
struct rtw_phl_stainfo_t *sta,
u8 *sta_addr,
bool is_connect,
enum phl_cmd_type cmd_type,
u32 cmd_timeout)
{
#ifdef CONFIG_CMD_DISP
enum rtw_phl_status psts = RTW_PHL_STATUS_FAILURE;
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
void *drv = phl_to_drvpriv(phl_info);
struct rtw_wifi_role_t *wrole = NULL;
struct sta_media_param *sta_ms = NULL;
u32 sta_ms_len = 0;
if (cmd_type == PHL_CMD_DIRECTLY) {
psts = phl_update_media_status(phl_info, sta, sta_addr, is_connect);
goto _exit;
}
sta_ms_len = sizeof(struct sta_media_param);
sta_ms = _os_kmem_alloc(drv, sta_ms_len);
if (sta_ms == NULL) {
PHL_ERR("%s: alloc sta media status param failed!\n", __func__);
psts = RTW_PHL_STATUS_RESOURCE;
goto _exit;
}
_os_mem_set(drv, sta_ms, 0, sta_ms_len);
sta_ms->sta = sta;
sta_ms->is_connect = is_connect;
if (is_connect && sta_addr)
_os_mem_cpy(drv, sta_ms->sta_addr, sta_addr, MAC_ALEN);
wrole = sta->wrole;
psts = phl_cmd_enqueue(phl_info,
wrole->hw_band,
MSG_EVT_STA_MEDIA_STATUS_UPT,
(u8*)sta_ms,
sta_ms_len,
phl_update_media_status_done,
cmd_type,
cmd_timeout);
if (is_cmd_failure(psts)) {
/* Send cmd success, but wait cmd fail*/
psts = RTW_PHL_STATUS_FAILURE;
} else if (psts != RTW_PHL_STATUS_SUCCESS) {
/* Send cmd fail */
psts = RTW_PHL_STATUS_FAILURE;
_os_kmem_free(drv, sta_ms, sta_ms_len);
}
_exit:
return psts;
#else
PHL_TRACE(COMP_PHL_DBG, _PHL_INFO_, "%s: not support cmd to update media status\n",
__func__);
return phl_update_media_status((struct phl_info_t *)phl, sta, sta_addr, is_connect);
#endif
}
/**
* This function is called once station info changed
* (BW/NSS/RAMASK/SEC/ROLE/MACADDR........)
* @phl: see phl_info_t
* @stainfo: information is updated through phl_station_info
* @mode: see phl_upd_mode
*/
enum rtw_phl_status
phl_change_stainfo(struct phl_info_t *phl_info, struct rtw_phl_stainfo_t *sta,
enum phl_upd_mode mode)
{
enum rtw_hal_status hstatus = RTW_HAL_STATUS_FAILURE;
hstatus = rtw_hal_change_sta_entry(phl_info->hal, sta, mode);
if (hstatus != RTW_HAL_STATUS_SUCCESS) {
PHL_ERR("rtw_hal_change_sta_entry failure!\n");
return RTW_PHL_STATUS_FAILURE;
}
return RTW_PHL_STATUS_SUCCESS;
}
static enum rtw_phl_status
_change_stainfo(struct phl_info_t *phl_info,
struct rtw_phl_stainfo_t *sta, enum sta_chg_id chg_id, u8 *chg_info, u8 chg_info_len)
{
enum phl_upd_mode mode = PHL_UPD_STA_INFO_CHANGE;
switch (chg_id) {
case STA_CHG_BW:
case STA_CHG_NSS:
case STA_CHG_RAMASK:
{
PHL_INFO("%s MACID:%d %02x:%02x:%02x:%02x:%02x:%02x update bw\n",
__func__, sta->macid,
sta->mac_addr[0], sta->mac_addr[1], sta->mac_addr[2],
sta->mac_addr[3], sta->mac_addr[4], sta->mac_addr[5]);
}
break;
case STA_CHG_SEC_MODE:
sta->sec_mode = *((u8*)chg_info);
break;
case STA_CHG_MBSSID:
sta->addr_sel = 1;
sta->addr_msk = *((u8*)chg_info);
break;
case STA_CHG_RA_GILTF:
sta->hal_sta->ra_info.cal_giltf = *((u8*)chg_info);
PHL_INFO("%s: Config RA GI LTF = %d\n", __FUNCTION__, *((u8*)chg_info));
break;
case STA_CHG_MAX:
PHL_TRACE(COMP_PHL_DBG, _PHL_DEBUG_, "rtw_phl_change_stainfo(): Unsupported case:%d, please check it\n",
chg_id);
break;
default:
PHL_TRACE(COMP_PHL_DBG, _PHL_DEBUG_, "rtw_phl_change_stainfo(): Unrecognize case:%d, please check it\n",
chg_id);
break;
}
return phl_change_stainfo(phl_info, sta, mode);
}
#ifdef CONFIG_CMD_DISP
struct sta_chg_param {
struct rtw_phl_stainfo_t *sta;
enum sta_chg_id id;
u8 *info;
u8 info_len;
};
enum rtw_phl_status
phl_cmd_change_stainfo_hdl(struct phl_info_t *phl_info, u8 *param)
{
struct sta_chg_param *sta_param = (struct sta_chg_param *)param;
return _change_stainfo(phl_info,
sta_param->sta, sta_param->id,
sta_param->info, sta_param->info_len);
}
static void
_phl_cmd_change_stainfo_done(void *drv_priv, u8 *cmd, u32 cmd_len,
enum rtw_phl_status status)
{
struct sta_chg_param *sta_chg_info = NULL;
if (cmd == NULL || cmd_len == 0) {
PHL_ERR("%s buf == NULL || buf_len == 0\n", __func__);
_os_warn_on(1);
return;
}
sta_chg_info = (struct sta_chg_param *)cmd;
PHL_INFO("%s - id:%d .....\n", __func__, sta_chg_info->id);
if (sta_chg_info->info && sta_chg_info->info_len > 0)
_os_kmem_free(drv_priv, sta_chg_info->info, sta_chg_info->info_len);
_os_kmem_free(drv_priv, cmd, cmd_len);
cmd = NULL;
}
static enum rtw_phl_status
_phl_cmd_change_stainfo(struct phl_info_t *phl_info,
struct rtw_phl_stainfo_t *sta, enum sta_chg_id chg_id,
u8 *chg_info, u8 chg_info_len,
enum phl_cmd_type cmd_type, u32 cmd_timeout)
{
void *drv = phl_to_drvpriv(phl_info);
enum rtw_phl_status psts = RTW_PHL_STATUS_FAILURE;
struct rtw_wifi_role_t *wrole = sta->wrole;
struct sta_chg_param *param = NULL;
u8 param_len = 0;
if (cmd_type == PHL_CMD_DIRECTLY) {
psts = _change_stainfo(phl_info, sta, chg_id, chg_info, chg_info_len);
goto _exit;
}
param_len = sizeof(struct sta_chg_param);
param = _os_kmem_alloc(drv, param_len);
if (param == NULL) {
PHL_ERR("%s: alloc param failed!\n", __func__);
psts = RTW_PHL_STATUS_RESOURCE;
goto _exit;
}
_os_mem_set(drv, param, 0, param_len);
param->sta = sta;
param->id = chg_id;
param->info_len = chg_info_len;
if (chg_info_len > 0) {
param->info = _os_kmem_alloc(drv, chg_info_len);
if (param->info == NULL) {
PHL_ERR("%s: alloc param->info failed!\n", __func__);
psts = RTW_PHL_STATUS_RESOURCE;
goto _err_info;
}
_os_mem_set(drv, param->info, 0, chg_info_len);
_os_mem_cpy(drv, param->info, chg_info, chg_info_len);
} else {
param->info = NULL;
}
psts = phl_cmd_enqueue(phl_info,
wrole->hw_band,
MSG_EVT_STA_CHG_STAINFO,
(u8 *)param,
param_len,
_phl_cmd_change_stainfo_done,
cmd_type,
cmd_timeout);
if (is_cmd_failure(psts)) {
/* Send cmd success, but wait cmd fail*/
psts = RTW_PHL_STATUS_FAILURE;
} else if (psts != RTW_PHL_STATUS_SUCCESS) {
/* Send cmd fail */
psts = RTW_PHL_STATUS_FAILURE;
goto _err_cmd;
}
return psts;
_err_cmd:
if (param->info)
_os_kmem_free(drv, param->info, param->info_len);
_err_info:
if (param)
_os_kmem_free(drv, param, param_len);
_exit:
return psts;
}
#endif
enum rtw_phl_status
rtw_phl_cmd_change_stainfo(void *phl,
struct rtw_phl_stainfo_t *sta, enum sta_chg_id chg_id,
u8 *chg_info, u8 chg_info_len,
enum phl_cmd_type cmd_type, u32 cmd_timeout)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
#ifdef CONFIG_CMD_DISP
return _phl_cmd_change_stainfo(phl_info, sta, chg_id, chg_info, chg_info_len,
cmd_type, cmd_timeout);
#else
PHL_TRACE(COMP_PHL_DBG, _PHL_INFO_, "%s: not support alloc stainfo cmd\n",
__func__);
return _change_stainfo(phl_info, sta, chg_id, chg_info, chg_info_len);
#endif /* CONFIG_CMD_DISP */
}
/**
* This function updates tx/rx traffic status of each active station info
*/
void
phl_sta_trx_tfc_upd(struct phl_info_t *phl_info)
{
struct macid_ctl_t *macid_ctl = phl_to_mac_ctrl(phl_info);
struct rtw_phl_stainfo_t *phl_sta = NULL;
struct rtw_stats *sta_stats = NULL;
u16 max_macid_num = 0;
u16 mid = 0;
max_macid_num = macid_ctl->max_num;
_os_spinlock(phl_to_drvpriv(phl_info), &macid_ctl->lock, _bh, NULL);
for(mid = 0; mid < max_macid_num; mid++) {
if (_phl_macid_is_used(macid_ctl->used_map, mid)) {
phl_sta = macid_ctl->sta[mid];
if (phl_sta) {
#ifdef CONFIG_PHL_RA_TXSTS_DBG
/* issue H2C to get ra txsts report */
rtw_phl_txsts_rpt_config(phl_info, phl_sta);
#endif
sta_stats = &phl_sta->stats;
phl_tx_traffic_upd(sta_stats);
phl_rx_traffic_upd(sta_stats);
}
}
}
_os_spinunlock(phl_to_drvpriv(phl_info), &macid_ctl->lock, _bh, NULL);
}
/**
* This function is used to get phl sta info
* by macid
* @phl: see phl_info_t
* @macid: macid
*/
struct rtw_phl_stainfo_t *
rtw_phl_get_stainfo_by_macid(void *phl, u16 macid)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct macid_ctl_t *macid_ctl = phl_to_mac_ctrl(phl_info);
struct rtw_phl_stainfo_t *phl_sta = NULL;
if (macid >= macid_ctl->max_num) {
PHL_ERR("%s macid(%d) is invalid\n", __func__, macid);
return NULL;
}
_os_spinlock(phl_to_drvpriv(phl_info), &macid_ctl->lock, _bh, NULL);
if (_phl_macid_is_used(macid_ctl->used_map, macid))
phl_sta = macid_ctl->sta[macid];
if (phl_sta == NULL) {
PHL_TRACE(COMP_PHL_DBG, _PHL_DEBUG_,"%s sta info (macid:%d) is NULL\n", __func__, macid);
#ifdef CONFIG_PHL_USB_RELEASE_RPT_ENABLE
/* comment temporarily since release report may report unused macid */
/* and trigger call tracing */
/* _os_warn_on(1); */
#else
_os_warn_on(1);
#endif /* CONFIG_PHL_USB_RELEASE_RPT_ENABLE */
}
_os_spinunlock(phl_to_drvpriv(phl_info), &macid_ctl->lock, _bh, NULL);
return phl_sta;
}
struct rtw_phl_stainfo_t *
rtw_phl_get_stainfo_by_addr_ex(void *phl, u8 *addr)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct macid_ctl_t *mc = phl_to_mac_ctrl(phl_info);
struct rtw_phl_stainfo_t *sta = NULL;
u16 mid = 0;
u16 max_macid_num = mc->max_num;
bool sta_found = false;
_os_spinlock(phl_to_drvpriv(phl_info), &mc->lock, _bh, NULL);
for(mid = 0; mid < max_macid_num; mid++) {
if (_phl_macid_is_used(mc->used_map, mid)) {
sta = mc->sta[mid];
if (_os_mem_cmp(phl_to_drvpriv(phl_info),
sta->mac_addr, addr, MAC_ALEN) == 0) {
sta_found = true;
break;
}
}
}
_os_spinunlock(phl_to_drvpriv(phl_info), &mc->lock, _bh, NULL);
if (sta_found == false)
sta = NULL;
return sta;
}
u16 rtw_phl_get_macid_by_addr(void *phl, u8 *addr)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct macid_ctl_t *mc = phl_to_mac_ctrl(phl_info);
struct rtw_phl_stainfo_t *sta = NULL;
sta = rtw_phl_get_stainfo_by_addr_ex(phl, addr);
if (sta)
return sta->macid;
return mc->max_num;
}
/**
* This function is called to create phl_station_info
* return pointer to rtw_phl_stainfo_t
* @phl: see phl_info_t
* @roleidx: index of wifi role(linux) port nubmer(windows)
* @addr: current address of this station
*/
struct rtw_phl_stainfo_t *
rtw_phl_get_stainfo_by_addr(void *phl, struct rtw_wifi_role_t *wrole, u8 *addr)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct macid_ctl_t *macid_ctl = phl_to_mac_ctrl(phl_info);
struct rtw_phl_stainfo_t *sta = NULL;
if (is_broadcast_mac_addr(addr)) {
u16 macid = macid_ctl->wrole_bmc[wrole->id];
if (macid >= macid_ctl->max_num)
sta = NULL;
else
sta = macid_ctl->sta[macid];
goto _exit;
}
sta = phl_stainfo_queue_search(phl_info,
&wrole->assoc_sta_queue, addr);
_exit:
return sta;
}
struct rtw_phl_stainfo_t *
rtw_phl_get_stainfo_self(void *phl, struct rtw_wifi_role_t *wrole)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct rtw_phl_stainfo_t *sta = NULL;
#if 0
if ((wrole->type == PHL_RTYPE_STATION) &&
(wrole->mstate == MLME_LINKED))
//????
else
sta = phl_stainfo_queue_search(phl_info,
&wrole->assoc_sta_queue, wrole->mac_addr);
}
#else
sta = phl_stainfo_queue_get_first(phl_info, &wrole->assoc_sta_queue);
if (sta == NULL)
PHL_ERR("%s sta == NULL\n", __func__);
#endif
return sta;
}
u8
rtw_phl_get_sta_rssi(struct rtw_phl_stainfo_t *sta)
{
u8 rssi = rtw_hal_get_sta_rssi(sta);
return rssi;
}
u8 phl_get_min_rssi_bcn(struct phl_info_t *phl_info)
{
struct macid_ctl_t *macid_ctl = phl_to_mac_ctrl(phl_info);
struct rtw_phl_stainfo_t *sta = NULL;
u8 rssi_bcn_min = 0xFF;
u16 i = 0;
u8 rssi = 0;
for (i = 0; i < macid_ctl->max_num; i++) {
if (!_phl_macid_is_used(macid_ctl->used_map, i))
continue;
sta = rtw_phl_get_stainfo_by_macid(phl_info, i);
if (NULL == sta)
continue;
rssi = rtw_hal_get_sta_rssi_bcn(sta);
PHL_DBG("%s macid(%d) with rssi_bcn = %d\n",
__func__, i, rssi);
if (rssi == 0)
continue;
rssi_bcn_min = MIN(rssi, rssi_bcn_min);
}
return rssi_bcn_min;
}
enum rtw_phl_status
rtw_phl_query_rainfo(void *phl, struct rtw_phl_stainfo_t *phl_sta,
struct rtw_phl_rainfo *ra_info)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
enum rtw_phl_status phl_sts = RTW_PHL_STATUS_FAILURE;
do {
if (NULL == phl_sta) {
PHL_TRACE(COMP_PHL_XMIT, _PHL_ERR_,
"%s : phl_sta is NULL\n",
__func__);
break;
}
if (NULL == ra_info) {
PHL_TRACE(COMP_PHL_XMIT, _PHL_ERR_,
"%s : Input parameter is NULL\n",
__func__);
break;
}
if (RTW_HAL_STATUS_SUCCESS ==
rtw_hal_query_rainfo(phl_info->hal, phl_sta->hal_sta,
ra_info)) {
phl_sts = RTW_PHL_STATUS_SUCCESS;
break;
} else {
break;
}
} while (false);
return phl_sts;
}
/**
* rtw_phl_txsts_rpt_config() - issue h2c for txok and tx retry info
* @phl: struct phl_info_t *
* @phl_sta: indicate the first macid that you want to query.
* Return rtw_phl_txsts_rpt_config's return value in enum rtw_phl_status type.
*/
enum rtw_phl_status
rtw_phl_txsts_rpt_config(void *phl, struct rtw_phl_stainfo_t *phl_sta)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
enum rtw_phl_status phl_sts = RTW_PHL_STATUS_FAILURE;
if (phl_sta) {
if (RTW_HAL_STATUS_SUCCESS == rtw_hal_query_txsts_rpt(phl_info->hal, phl_sta->macid))
phl_sts = RTW_PHL_STATUS_SUCCESS;
}
return phl_sts;
}
#ifdef CONFIG_USB_HCI
/**
* rtw_phl_get_tx_ok_rpt() - get txok info.
* @phl: struct phl_info_t *
* @phl_sta: information is updated through phl_station_info.
* @tx_ok_cnt: buffer address that we used to store tx ok statistics.
* @qsel indicate which AC queue, or fetch all by PHL_AC_QUEUE_TOTAL
*
* Return rtw_phl_get_tx_ok_rpt's return value in enum rtw_phl_status type.
*/
enum rtw_phl_status
rtw_phl_get_tx_ok_rpt(void *phl, struct rtw_phl_stainfo_t *phl_sta, u32 *tx_ok_cnt,
enum phl_ac_queue qsel)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
enum rtw_phl_status phl_sts = RTW_PHL_STATUS_SUCCESS;
struct rtw_hal_stainfo_t *hal_sta;
if(phl_sta) {
hal_sta = phl_sta->hal_sta;
if (tx_ok_cnt && qsel <= PHL_AC_QUEUE_TOTAL) {
if (qsel == PHL_AC_QUEUE_TOTAL) {
/* copy all AC counter */
tx_ok_cnt[PHL_BE_QUEUE_SEL] = hal_sta->trx_stat.wp_rpt_stats[PHL_BE_QUEUE_SEL].tx_ok_cnt;
tx_ok_cnt[PHL_BK_QUEUE_SEL] = hal_sta->trx_stat.wp_rpt_stats[PHL_BK_QUEUE_SEL].tx_ok_cnt;
tx_ok_cnt[PHL_VI_QUEUE_SEL] = hal_sta->trx_stat.wp_rpt_stats[PHL_VI_QUEUE_SEL].tx_ok_cnt;
tx_ok_cnt[PHL_VO_QUEUE_SEL] = hal_sta->trx_stat.wp_rpt_stats[PHL_VO_QUEUE_SEL].tx_ok_cnt;
/* reset all counter */
_os_spinlock(phl_to_drvpriv(phl_info), &hal_sta->trx_stat.tx_sts_lock, _bh, NULL);
hal_sta->trx_stat.wp_rpt_stats[PHL_BE_QUEUE_SEL].tx_ok_cnt = 0;
hal_sta->trx_stat.wp_rpt_stats[PHL_BK_QUEUE_SEL].tx_ok_cnt = 0;
hal_sta->trx_stat.wp_rpt_stats[PHL_VI_QUEUE_SEL].tx_ok_cnt = 0;
hal_sta->trx_stat.wp_rpt_stats[PHL_VO_QUEUE_SEL].tx_ok_cnt = 0;
_os_spinunlock(phl_to_drvpriv(phl_info), &hal_sta->trx_stat.tx_sts_lock, _bh, NULL);
} else {
/*copy target AC queue counter*/
*tx_ok_cnt = hal_sta->trx_stat.wp_rpt_stats[qsel].tx_ok_cnt;
/* reset target AC queue counter */
_os_spinlock(phl_to_drvpriv(phl_info), &hal_sta->trx_stat.tx_sts_lock, _bh, NULL);
hal_sta->trx_stat.wp_rpt_stats[qsel].tx_ok_cnt = 0;
_os_spinunlock(phl_to_drvpriv(phl_info), &hal_sta->trx_stat.tx_sts_lock, _bh, NULL);
}
} else {
phl_sts = RTW_PHL_STATUS_FAILURE;
PHL_ERR("tx_ok_cnt = %p, qsel = %d\n", tx_ok_cnt, qsel);
}
} else {
phl_sts = RTW_PHL_STATUS_FAILURE;
PHL_ERR("PHL STA NULL.\n");
}
return phl_sts;
}
static u32 rtw_phl_get_hw_tx_fail_cnt(struct rtw_hal_stainfo_t *hal_sta,
enum phl_ac_queue qsel) {
u32 total = 0;
if (hal_sta) {
total = hal_sta->trx_stat.wp_rpt_stats[qsel].rty_fail_cnt\
+ hal_sta->trx_stat.wp_rpt_stats[qsel].lifetime_drop_cnt \
+ hal_sta->trx_stat.wp_rpt_stats[qsel].macid_drop_cnt;
}
return total;
}
static void rtw_phl_reset_tx_fail_cnt(struct phl_info_t *phl_info,
struct rtw_hal_stainfo_t *hal_sta, enum phl_ac_queue qsel) {
if (hal_sta) {
_os_spinlock(phl_to_drvpriv(phl_info), &hal_sta->trx_stat.tx_sts_lock, _bh, NULL);
hal_sta->trx_stat.wp_rpt_stats[qsel].rty_fail_cnt = 0;
hal_sta->trx_stat.wp_rpt_stats[qsel].lifetime_drop_cnt = 0;
hal_sta->trx_stat.wp_rpt_stats[qsel].macid_drop_cnt = 0;
_os_spinunlock(phl_to_drvpriv(phl_info), &hal_sta->trx_stat.tx_sts_lock, _bh, NULL);
}
}
/**
* rtw_phl_get_tx_fail_rpt() - get tx fail info.
* @phl: struct phl_info_t *
* @phl_sta: information is updated through phl_station_info.
* @tx_fail_cnt: buffer address that we used to store tx fail statistics.
* @qsel indicate which AC queue, or fetch all by PHL_AC_QUEUE_TOTAL
*
* Return rtw_phl_get_tx_fail_rpt's return value in enum rtw_phl_status type.
*/
enum rtw_phl_status
rtw_phl_get_tx_fail_rpt(void *phl, struct rtw_phl_stainfo_t *phl_sta, u32 *tx_fail_cnt,
enum phl_ac_queue qsel)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
enum rtw_phl_status phl_sts = RTW_PHL_STATUS_SUCCESS;
struct rtw_hal_stainfo_t *hal_sta;
if(phl_sta) {
hal_sta = phl_sta->hal_sta;
if (tx_fail_cnt && qsel <= PHL_AC_QUEUE_TOTAL) {
if (qsel == PHL_AC_QUEUE_TOTAL) {
/* copy all AC counter */
tx_fail_cnt[PHL_BE_QUEUE_SEL] = rtw_phl_get_hw_tx_fail_cnt(hal_sta, PHL_BE_QUEUE_SEL);
tx_fail_cnt[PHL_BK_QUEUE_SEL] = rtw_phl_get_hw_tx_fail_cnt(hal_sta, PHL_BK_QUEUE_SEL);
tx_fail_cnt[PHL_VI_QUEUE_SEL] = rtw_phl_get_hw_tx_fail_cnt(hal_sta, PHL_VI_QUEUE_SEL);
tx_fail_cnt[PHL_VO_QUEUE_SEL] = rtw_phl_get_hw_tx_fail_cnt(hal_sta, PHL_VO_QUEUE_SEL);
/* reset all counter */
rtw_phl_reset_tx_fail_cnt(phl_info, hal_sta, PHL_BE_QUEUE_SEL);
rtw_phl_reset_tx_fail_cnt(phl_info, hal_sta, PHL_BK_QUEUE_SEL);
rtw_phl_reset_tx_fail_cnt(phl_info, hal_sta, PHL_VI_QUEUE_SEL);
rtw_phl_reset_tx_fail_cnt(phl_info, hal_sta, PHL_VO_QUEUE_SEL);
} else {
/*copy target AC queue counter*/
tx_fail_cnt[qsel] = rtw_phl_get_hw_tx_fail_cnt(hal_sta, qsel);
/* reset target AC queue counter */
rtw_phl_reset_tx_fail_cnt(phl_info, hal_sta, qsel);
}
} else {
phl_sts = RTW_PHL_STATUS_FAILURE;
PHL_ERR("tx_fail_cnt = %p, qsel = %d\n", tx_fail_cnt, qsel);
}
} else {
phl_sts = RTW_PHL_STATUS_FAILURE;
PHL_ERR("PHL STA NULL.\n");
}
return phl_sts;
}
/**
* rtw_phl_get_tx_retry_rpt() - get tx retry info.
* @phl: struct phl_info_t *
* @phl_sta: information is updated through phl_station_info.
* @tx_retry_cnt: buffer address that we used to store tx fail statistics.
* @qsel indicate which AC queue, or fetch all by PHL_AC_QUEUE_TOTAL
*
* Return rtw_phl_get_tx_retry_rpt's return value in enum rtw_phl_status type.
*/
enum rtw_phl_status
rtw_phl_get_tx_retry_rpt(void *phl, struct rtw_phl_stainfo_t *phl_sta, u32 *tx_retry_cnt,
enum phl_ac_queue qsel)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
void *drv = phl_to_drvpriv(phl_info);
enum rtw_phl_status phl_sts = RTW_PHL_STATUS_SUCCESS;
struct rtw_hal_stainfo_t *hal_sta;
if(phl_sta) {
hal_sta = phl_sta->hal_sta;
if (tx_retry_cnt && qsel <= PHL_AC_QUEUE_TOTAL) {
if (qsel == PHL_AC_QUEUE_TOTAL) {
/* copy all AC counter */
_os_mem_cpy(drv, tx_retry_cnt, hal_sta->ra_info.tx_retry_cnt,
sizeof(u32)*PHL_AC_QUEUE_TOTAL);
/* reset all counter */
/* TODO: Here needs lock, and so does halbb_get_txsts_rpt */
_os_mem_set(drv, hal_sta->ra_info.tx_retry_cnt, 0, sizeof(u32)*PHL_AC_QUEUE_TOTAL);
} else {
/*copy target AC queue counter*/
*tx_retry_cnt = hal_sta->ra_info.tx_retry_cnt[qsel];
/* reset target AC queue counter */
/* TODO: Here needs lock, and so does halbb_get_txsts_rpt */
hal_sta->ra_info.tx_retry_cnt[qsel] = 0;
}
} else {
phl_sts = RTW_PHL_STATUS_FAILURE;
PHL_ERR("tx_retry_cnt = %p, qsel = %d\n", tx_retry_cnt, qsel);
}
} else {
phl_sts = RTW_PHL_STATUS_FAILURE;
PHL_ERR("PHL STA NULL.\n");
}
return phl_sts;
}
#endif /* CONFIG_USB_HCI */
/*
* Get next idx
*/
u8 _get_fidx(u8 num, u8 cur_idx)
{
u8 idx = 0;
if (num == 0)
idx = cur_idx;
else {
idx = cur_idx + 1;
if (idx >= MAX_STORE_BCN_NUM)
idx = 0;
}
return idx;
}
/*
* Get previous idx
*/
u8 _get_bidx(u8 num, u8 cur_idx)
{
u8 idx = 0;
if (cur_idx == 0) {
idx = num - 1;
} else {
idx = cur_idx - 1;
}
return idx;
}
void _phl_sta_up_bcn_offset_info(struct phl_info_t *phl,
struct rtw_rx_bcn_info *bcn_i, u16 bcn_intvl)
{
struct rtw_bcn_offset *offset_i = &bcn_i->offset_i;
u16 offset = bcn_intvl;
u16 similar_th = 2;/*Unit: TU*/
u64 diff = 0;
u8 idx = 0, jdx = 0, cur_idx = 0, bidx = 0, start_idx = 0;
if (bcn_i->num == 1) {
offset_i->offset = (u16)bcn_i->info[1][bcn_i->idx];
offset_i->conf_lvl = CONF_LVL_LOW;
PHL_TRACE(COMP_PHL_DBG, _PHL_WARNING_, "_phl_sta_up_bcn_offset_info(): bcn_i->num ==1, conf_lvl = CONF_LVL_LOW, offset(%d)\n",
offset_i->offset);
goto exit;
}
cur_idx = bcn_i->idx;
start_idx = cur_idx;
for (idx = 0; idx < bcn_i->num; idx++) {
bidx = cur_idx;
for (jdx = 1; jdx < bcn_i->num; jdx++) {
bidx = _get_bidx(bcn_i->num, bidx);
if (start_idx == bidx)
break;
diff = bcn_i->info[0][cur_idx] - bcn_i->info[0][bidx];
diff = _os_division64(
_os_modular64(diff, bcn_intvl * TU), TU);
/*ex: diff = 99, BcnIntvl = 100, It's similar case
* diff = 2, BcnIntvl = 100, It's similar case
*/
if (!((diff < similar_th) ||
((bcn_intvl - diff) < similar_th))) {
continue;
}
if (offset > bcn_i->info[1][cur_idx])
offset = (u16)bcn_i->info[1][cur_idx];
if (offset > bcn_i->info[1][bidx])
offset = (u16)bcn_i->info[1][bidx];
}
cur_idx = _get_bidx(bcn_i->num, cur_idx);
}
if (offset != bcn_intvl) {
offset_i->conf_lvl = CONF_LVL_HIGH;
if (offset < offset_i->offset) {
offset_i->offset = offset;
}
goto exit;
}
for (idx = 0; idx < bcn_i->num; idx++) {
if (bcn_i->info[1][idx] < offset_i->offset) {
offset_i->offset = (u16)bcn_i->info[1][idx];
offset_i->conf_lvl = CONF_LVL_MID;
}
}
exit:
/*
if offset is small, maybe impact by environment, offset < 5% bcn_intvl, we consider offset is 0
*/
if ((offset_i->offset != 0) &&
(offset_i->offset < ((bcn_intvl * 5) / 100))) {
PHL_TRACE(COMP_PHL_MCC, _PHL_WARNING_, "_phl_sta_up_bcn_offset_info(): offset(%d) < (%d), set offset = 0\n",
offset_i->offset, (bcn_intvl * 5) / 100);
offset_i->offset = 0;
}
PHL_TRACE(COMP_PHL_DBG, _PHL_DEBUG_, "_phl_sta_up_bcn_offset_info(): bcn num(%d), offset(%d), conf_lvl(%d), current CR(%d)\n",
bcn_i->num, offset_i->offset, offset_i->conf_lvl, offset_i->cr_tbtt_shift);
return;
}
void rtw_phl_sta_up_rx_bcn(void *phl, struct rtw_bcn_pkt_info *info)
{
struct rtw_rx_bcn_info *bcn_i = &info->sta->bcn_i;
u16 bcn_intvl = info->sta->asoc_cap.bcn_interval;
if (bcn_intvl == 0) {
PHL_TRACE(COMP_PHL_DBG, _PHL_WARNING_, "bcn_intvl == 0");
return;
}
bcn_i->idx = _get_fidx(bcn_i->num, bcn_i->idx);
if (bcn_i->num < MAX_STORE_BCN_NUM)
bcn_i->num++;
bcn_i->info[0][bcn_i->idx] = info->tsf;
bcn_i->info[1][bcn_i->idx] = _os_division64(
_os_modular64(info->tsf, bcn_intvl * TU), TU);
bcn_i->info[2][bcn_i->idx] = info->hw_tsf;
_phl_sta_up_bcn_offset_info(phl, bcn_i, bcn_intvl);
}
void phl_clean_sta_bcn_info(struct phl_info_t *phl, struct rtw_phl_stainfo_t *sta)
{
void *priv = phl_to_drvpriv(phl);
struct rtw_rx_bcn_info *bcn_i = &sta->bcn_i;
PHL_TRACE(COMP_PHL_DBG, _PHL_INFO_, "phl_clean_sta_bcn_info(): sta->wrole->id(%d)\n",
sta->wrole->id);
_os_mem_set(priv, bcn_i, 0, sizeof(struct rtw_rx_bcn_info));
}
struct rtw_bcn_offset * phl_get_sta_bcn_offset_info(struct phl_info_t *phl,
struct rtw_wifi_role_t *wrole)
{
struct rtw_phl_stainfo_t *sta = rtw_phl_get_stainfo_self(phl, wrole);
struct rtw_bcn_offset *offset_i = &sta->bcn_i.offset_i;
return offset_i;
}
void phl_bcn_watchdog(struct phl_info_t *phl)
{
u8 ridx = MAX_WIFI_ROLE_NUMBER;
struct rtw_wifi_role_t *wrole = NULL;
struct rtw_bcn_offset *b_ofst_i = NULL;
enum rtw_hal_status hstatus = RTW_HAL_STATUS_SUCCESS;
for (ridx = 0; ridx < MAX_WIFI_ROLE_NUMBER; ridx++) {
wrole = phl_get_wrole_by_ridx(phl, ridx);
if (wrole == NULL)
continue;
if (rtw_phl_role_is_client_category(wrole) && wrole->mstate == MLME_LINKED) {
b_ofst_i = phl_get_sta_bcn_offset_info(phl, wrole);
if (b_ofst_i->conf_lvl >= CONF_LVL_MID &&
b_ofst_i->offset != b_ofst_i->cr_tbtt_shift) {
PHL_TRACE(COMP_PHL_DBG, _PHL_INFO_, "%s(): update bcn offset to %d TU\n",
__func__, b_ofst_i->offset);
hstatus = rtw_hal_role_cfg_ex(phl->hal, wrole, PCFG_TBTT_SHIFT, &(b_ofst_i->offset));
if (hstatus == RTW_HAL_STATUS_SUCCESS)
b_ofst_i->cr_tbtt_shift = b_ofst_i->offset;
else
PHL_ERR("%s(): role cfg fail, status: %d\n", __func__, hstatus);
}
}
}
}
/*
* calculate the value between current TSF and TBTT
* TSF 0 50 180 150 250
* TBTT ^ ^ ^
* Curr T |
* | 30 |
*
* TSF 0 80 120 180 280
* TBTT ^ ^ ^
* Curr T |
* | 40 |
* @wrole: specific role, we get bcn offset info from the role.
* @cur_t: current TSF
* @ofst: output value, unit: TU
*/
bool phl_calc_offset_from_tbtt(struct phl_info_t *phl,
struct rtw_wifi_role_t *wrole, u64 cur_t, u16 *ofst)
{
struct rtw_bcn_offset *b_ofst_i = phl_get_sta_bcn_offset_info(phl, wrole);
struct rtw_phl_stainfo_t *sta = rtw_phl_get_stainfo_self(phl, wrole);
u64 b_ofst = b_ofst_i->offset;
u64 b_intvl = 0;
u32 mod = 0; /*TU*/
#ifdef RTW_PHL_BCN
if (phl_role_is_ap_category(wrole))
b_intvl = (u16)wrole->bcn_cmn.bcn_interval;
else
#endif
b_intvl = sta->asoc_cap.bcn_interval;
if (0 == b_intvl) {
PHL_TRACE(COMP_PHL_DBG, _PHL_ERR_, "phl_calc_offset_from_tbtt(): Fail, b_intvl ==0, wrole->id(%d), type(%d)\n",
wrole->id, wrole->type);
return false;
}
mod = (u32)_os_division64(_os_modular64(cur_t, b_intvl * TU), TU);
if (mod < b_ofst) {
*ofst = (u16)(mod + (b_intvl - b_ofst));
} else {
*ofst = (u16)(mod - b_ofst);
}
PHL_TRACE(COMP_PHL_DBG, _PHL_INFO_, "phl_calc_offset_from_tbtt(): wrole->id(%d), ofst(%d), cur_t: 0x%08x %08x modular(%d, TU), Bcn offset: conf_lvl(%d), offset(%d)\n",
wrole->id, *ofst, (u32)(cur_t >> 32), (u32)cur_t, mod,
b_ofst_i->conf_lvl, (u32)b_ofst);
return true;
}
/*
* Synchronize TBTT of target role with TBTT of sourec role
* Assume TBTT of target role is locate in Mod(Tgt Tsf) = 0
* @sync_ofst: Offset between TBTT of target role and TBTT of sourec role. Unit: TU
* @sync_now_once: Sync once time right now.
* @*diff_t : output diff_tsf. Unit: TU
*/
enum rtw_phl_status rtw_phl_tbtt_sync(struct phl_info_t *phl,
struct rtw_wifi_role_t *src_role,
struct rtw_wifi_role_t *tgt_role,
u16 sync_ofst, bool sync_now_once, u16 *diff_t)
{
enum rtw_phl_status status = RTW_PHL_STATUS_FAILURE;
u32 tsf_h = 0, tsf_l = 0;
u64 tsf = 0, tgt_tsf = 0, bcn_intvl = 0;
u16 ofst = 0;
u64 diff_tsf = 0;
enum hal_tsf_sync_act act = sync_now_once ? HAL_TSF_SYNC_NOW_ONCE :
HAL_TSF_EN_SYNC_AUTO;
if (RTW_HAL_STATUS_SUCCESS != rtw_hal_get_tsf(phl->hal,
src_role->hw_port, &tsf_h, &tsf_l)) {
PHL_TRACE(COMP_PHL_DBG, _PHL_INFO_, "rtw_phl_tbtt_sync(): Get tsf fail, src_role->id(%d)\n",
src_role->id);
goto exit;
}
bcn_intvl = phl_role_get_bcn_intvl(phl, tgt_role);
if (bcn_intvl == 0) {
PHL_TRACE(COMP_PHL_DBG, _PHL_ERR_, "rtw_phl_tbtt_sync(): bcn_intvl == 0, tgt_role->id(%d)\n",
tgt_role->id);
goto exit;
}
tsf = tsf_h;
tsf = tsf << 32;
tsf |= tsf_l;
/*calculate the value between current TSF and TBTT*/
phl_calc_offset_from_tbtt(phl, src_role, tsf, &ofst);
tgt_tsf = (tsf + sync_ofst * TU) - ofst * TU;
/*Find diff_tsf, let Mod((tgt_tsf + diff_tsf), bcn_intvl) = 0*/
diff_tsf = bcn_intvl * TU - _os_modular64(tgt_tsf, bcn_intvl * TU);
diff_tsf = _os_division64(diff_tsf, TU);
PHL_TRACE(COMP_PHL_DBG, _PHL_INFO_, "rtw_phl_tbtt_sync(): diff_tsf(%d), sync_ofst(%d), ofst(%d)\n",
(u32)diff_tsf, sync_ofst, (u32)ofst);
if (RTW_HAL_STATUS_SUCCESS != rtw_hal_tsf_sync(phl->hal,
src_role->hw_port, tgt_role->hw_port,
src_role->hw_band, (s32)diff_tsf,
act)) {
PHL_TRACE(COMP_PHL_DBG, _PHL_ERR_, "rtw_phl_tbtt_sync(): Sync tsf fail\n");
goto exit;
}
if (RTW_HAL_STATUS_SUCCESS == rtw_hal_get_tsf(phl->hal,
src_role->hw_port, &tsf_h, &tsf_l)) {
PHL_TRACE(COMP_PHL_DBG, _PHL_INFO_, "rtw_phl_tbtt_sync(): tsf_src(0x%08x %08x)\n",
tsf_h, tsf_l);
}
if (RTW_HAL_STATUS_SUCCESS == rtw_hal_get_tsf(phl->hal,
tgt_role->hw_port, &tsf_h, &tsf_l)) {
PHL_TRACE(COMP_PHL_DBG, _PHL_INFO_, "rtw_phl_tbtt_sync(): tsf_tgt(0x%08x %08x)\n",
tsf_h, tsf_l);
}
*diff_t = (u16)diff_tsf;
status = RTW_PHL_STATUS_SUCCESS;
exit:
return status;
}
|
2301_81045437/rtl8852be
|
phl/phl_sta.c
|
C
|
agpl-3.0
| 70,524
|
/******************************************************************************
*
* Copyright(c) 2019 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#ifndef _PHL_STA_H_
#define _PHL_STA_H_
/*********** macid ctrl section ***********/
enum rtw_phl_status
phl_macid_ctrl_init(struct phl_info_t *phl);
enum rtw_phl_status
phl_macid_ctrl_deinit(struct phl_info_t *phl);
u16 rtw_phl_get_macid_max_num(void *phl);
u16 rtw_phl_wrole_bcmc_id_get(void *phl, struct rtw_wifi_role_t *wrole);
u8 rtw_phl_macid_is_bmc(void *phl, u16 macid);
u8 rtw_phl_macid_is_used(void *phl, u16 macid);
static inline bool
phl_macid_is_valid(struct phl_info_t *phl_info, u16 macid)
{
return (macid < phl_info->macid_ctrl.max_num) ? true : false;
}
u16 rtw_phl_get_macid_by_addr(void *phl, u8 *addr);
/*********** stainfo_ctrl section ***********/
enum rtw_phl_status
phl_stainfo_ctrl_init(struct phl_info_t *phl_info);
enum rtw_phl_status
phl_stainfo_ctrl_deinie(struct phl_info_t *phl_info);
void phl_clean_sta_bcn_info(struct phl_info_t *phl, struct rtw_phl_stainfo_t *sta);
struct rtw_bcn_offset * phl_get_sta_bcn_offset_info(struct phl_info_t *phl,
struct rtw_wifi_role_t *wrole);
void phl_bcn_watchdog(struct phl_info_t *phl);
bool phl_calc_offset_from_tbtt(struct phl_info_t *phl,
struct rtw_wifi_role_t *wrole, u64 cur_t, u16 *ofst);
enum rtw_phl_status rtw_phl_tbtt_sync(struct phl_info_t *phl,
struct rtw_wifi_role_t *src_role,
struct rtw_wifi_role_t *tgt_role,
u16 sync_ofst, bool sync_now_once, u16 *diff_t);
#ifdef DBG_PHL_STAINFO
void phl_dump_stactrl(const char *caller,
const int line,
bool show_caller,
struct phl_info_t *phl_info);
#define PHL_DUMP_STACTRL(_phl_info) phl_dump_stactrl(__FUNCTION__, __LINE__, false, _phl_info);
#define PHL_DUMP_STACTRL_EX(_phl_info) phl_dump_stactrl(__FUNCTION__, __LINE__, true, _phl_info);
void phl_dump_stainfo_all(const char *caller,
const int line, bool show_caller,
struct phl_info_t *phl_info);
#define PHL_DUMP_STAINFO(_phl_info) phl_dump_stainfo_all(__FUNCTION__, __LINE__, false, _phl_info);
#define PHL_DUMP_STAINFO_EX(_phl_info) phl_dump_stainfo_all(__FUNCTION__, __LINE__, true, _phl_info);
void phl_dump_stainfo_per_role(const char *caller,
const int line,
bool show_caller,
struct phl_info_t *phl_info,
struct rtw_wifi_role_t *wrole);
#define PHL_DUMP_ROLE_STAINFO(_phl_info, wrole) phl_dump_stainfo_per_role(__FUNCTION__, __LINE__, false, _phl_info, wrole);
#define PHL_DUMP_ROLE_STAINFO_EX(_phl_info, wrole) phl_dump_stainfo_per_role(__FUNCTION__, __LINE__, true, _phl_info, wrole);
#else
#define PHL_DUMP_STACTRL(_phl_info)
#define PHL_DUMP_STACTRL_EX(_phl_info)
#define PHL_DUMP_STAINFO(_phl_info)
#define PHL_DUMP_STAINFO_EX(_phl_info)
#define PHL_DUMP_ROLE_STAINFO(_phl_info, wrole)
#define PHL_DUMP_ROLE_STAINFO_EX(_phl_info, wrole)
#endif
/*********** phl stainfo section ***********/
/*WIFI sta_info management section*/
struct rtw_phl_stainfo_t *
phl_alloc_stainfo_sw(struct phl_info_t *phl_info,
u8 *sta_addr,
struct rtw_wifi_role_t *wrole);
enum rtw_phl_status
phl_alloc_stainfo_hw(struct phl_info_t *phl_info, struct rtw_phl_stainfo_t *sta);
enum rtw_phl_status
phl_free_stainfo_sw(struct phl_info_t *phl_info, struct rtw_phl_stainfo_t *sta);
enum rtw_phl_status
phl_free_stainfo_hw(struct phl_info_t *phl_info, struct rtw_phl_stainfo_t *sta);
enum rtw_phl_status
rtw_phl_cmd_alloc_stainfo(void *phl,
struct rtw_phl_stainfo_t **sta,
u8 *sta_addr,
struct rtw_wifi_role_t *wrole,
bool alloc, bool only_hw,
enum phl_cmd_type cmd_type,
u32 cmd_timeout);
enum rtw_phl_status
phl_wifi_role_free_stainfo_hw(struct phl_info_t *phl_info,
struct rtw_wifi_role_t *wrole);
enum rtw_phl_status
phl_wifi_role_free_stainfo_sw(struct phl_info_t *phl_info,
struct rtw_wifi_role_t *role);
enum rtw_phl_status
phl_wifi_role_free_stainfo(struct phl_info_t *phl_info,
struct rtw_wifi_role_t *role);
#ifdef CONFIG_CMD_DISP
enum rtw_phl_status
phl_update_media_status_hdl(struct phl_info_t *phl_info, u8 *param);
#endif
struct rtw_phl_stainfo_t *
rtw_phl_get_stainfo_by_macid(void *phl, u16 macid);
struct rtw_phl_stainfo_t *
rtw_phl_get_stainfo_by_addr(void *phl, struct rtw_wifi_role_t *wrole, u8 *addr);
struct rtw_phl_stainfo_t *
rtw_phl_get_stainfo_by_addr_ex(void *phl, u8 *addr);
struct rtw_phl_stainfo_t *
rtw_phl_get_stainfo_self(void *phl, struct rtw_wifi_role_t *wrole);
u8 phl_get_min_rssi_bcn(struct phl_info_t *phl_info);
#ifdef CONFIG_CMD_DISP
enum rtw_phl_status
phl_cmd_change_stainfo_hdl(struct phl_info_t *phl_info, u8 *param);
#endif
enum rtw_phl_status
phl_change_stainfo(struct phl_info_t *phl_info,
struct rtw_phl_stainfo_t *sta,
enum phl_upd_mode mode);
void
phl_sta_trx_tfc_upd(struct phl_info_t *phl_info);
#ifdef CONFIG_CMD_DISP
enum rtw_phl_status
phl_cmd_set_key_hdl(struct phl_info_t *phl_info, u8 *param);
enum rtw_phl_status
phl_cmd_alloc_stainfo_hdl(struct phl_info_t *phl_info, u8 *param);
#endif /* CONFIG_CMD_DISP */
#endif /*_PHL_STA_H_*/
|
2301_81045437/rtl8852be
|
phl/phl_sta.h
|
C
|
agpl-3.0
| 5,850
|
/******************************************************************************
*
* Copyright(c) 2019 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#ifndef _PHL_STATUS_H_
#define _PHL_STATUS_H_
enum rtw_phl_status {
RTW_PHL_STATUS_SUCCESS, /* 0 */
RTW_PHL_STATUS_FAILURE, /* 1 */
RTW_PHL_STATUS_RESOURCE, /* 2 */
RTW_PHL_STATUS_HAL_INIT_FAILURE, /* 3 */
RTW_PHL_STATUS_PENDING, /* 4 */
RTW_PHL_STATUS_FRAME_DROP, /* 5 */
RTW_PHL_STATUS_INVALID_PARAM, /* 6 */
RTW_PHL_STATUS_CMD_TIMEOUT, /* 7 */
RTW_PHL_STATUS_CMD_ERROR, /* 8 */
RTW_PHL_STATUS_CMD_DROP, /* 9 */
RTW_PHL_STATUS_CMD_CANNOT_IO, /* 10 */
RTW_PHL_STATUS_CMD_SUCCESS, /* 11 */
RTW_PHL_STATUS_UNEXPECTED_ERROR, /* 12 */
RTW_PHL_STATUS_CANNOT_IO, /* 13 */
};
#define is_cmd_failure(psts) ((psts == RTW_PHL_STATUS_CMD_TIMEOUT) || \
(psts == RTW_PHL_STATUS_CMD_ERROR) || \
(psts == RTW_PHL_STATUS_CMD_DROP) || \
(psts == RTW_PHL_STATUS_CMD_CANNOT_IO))
enum phl_mdl_ret_code {
MDL_RET_SUCCESS = 0,
MDL_RET_FAIL,
MDL_RET_IGNORE,
MDL_RET_PENDING,
MDL_RET_CANNOT_IO,
};
#endif /*_PHL_STATUS_H_*/
|
2301_81045437/rtl8852be
|
phl/phl_status.h
|
C
|
agpl-3.0
| 1,589
|
/******************************************************************************
*
* Copyright(c) 2019 - 2021 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#ifndef _PHL_STRUCT_H_
#define _PHL_STRUCT_H_
#define PHL_MACID_MAX_ARRAY_NUM 8 /* 8x32=256 */
#define PHL_MACID_MAX_NUM (PHL_MACID_MAX_ARRAY_NUM * 32)
#define PHL_STA_TID_NUM (16) /* TODO: */
struct hci_info_t {
/* enum rtw_hci_type hci_type; */
#if defined(CONFIG_PCI_HCI)
u8 total_txch_num;
u8 total_rxch_num;
u8 *txbd_buf;
u8 *rxbd_buf;
#if defined(PCIE_TRX_MIT_EN)
u8 fixed_mitigation; /*no watchdog dynamic setting*/
#endif
#elif defined(CONFIG_USB_HCI)
u16 usb_bulkout_size;
#elif defined(CONFIG_SDIO_HCI)
u32 tx_drop_cnt; /* bit31 means overflow or not */
#ifdef SDIO_TX_THREAD
_os_sema tx_thrd_sema;
_os_thread tx_thrd;
#endif /* SDIO_TX_THREAD */
#endif
u8 *wd_ring;
u8 *txbuf_pool;
u8 *rxbuf_pool;
u8 *wp_tag;
u16 wp_seq[PHL_MACID_MAX_NUM]; /* maximum macid number */
};
#define MAX_PHL_RING_STATUS_NUMBER 64
#define RX_REORDER_RING_NUMBER PHL_MACID_MAX_NUM
#define PCIE_BUS_EFFICIENCY 4
#define ETH_ALEN 6
struct phl_ring_status {
_os_list list;
u16 macid;
u8 band;/*0 or 1*/
u8 wmm;/*0 or 1*/
u8 port;
/*u8 mbssid*/
u16 req_busy;
struct rtw_phl_tx_ring *ring_ptr;
};
struct phl_ring_sts_pool {
struct phl_ring_status ring_sts[MAX_PHL_RING_STATUS_NUMBER];
_os_list idle;
_os_list busy;
_os_lock idle_lock;
_os_lock busy_lock;
};
/**
* struct phl_hci_trx_ops - interface specific operations
*
* @hci_trx_init: the function for HCI trx init
* @hci_trx_deinit: the function for HCI trx deinit
* @prepare_tx: prepare packets for hal transmission
* @recycle_rx_buf: recycle rx buffer
* @tx: tx packet to hw
* @rx: rx packet to sw
*/
struct phl_info_t;
struct phl_hci_trx_ops {
enum rtw_phl_status (*hci_trx_init)(struct phl_info_t *phl);
void (*hci_trx_deinit)(struct phl_info_t *phl);
enum rtw_phl_status (*prepare_tx)(struct phl_info_t *phl,
struct rtw_xmit_req *tx_req);
enum rtw_phl_status (*recycle_rx_buf)(struct phl_info_t *phl,
void *r, u8 ch, enum rtw_rx_type type);
enum rtw_phl_status (*tx)(struct phl_info_t *phl);
enum rtw_phl_status (*rx)(struct phl_info_t *phl);
enum rtw_phl_status (*trx_cfg)(struct phl_info_t *phl);
void (*trx_stop)(struct phl_info_t *phl);
enum rtw_phl_status (*pltfm_tx)(struct phl_info_t *phl, void *pkt);
void (*free_h2c_pkt_buf)(struct phl_info_t *phl_info,
struct rtw_h2c_pkt *_h2c_pkt);
enum rtw_phl_status (*alloc_h2c_pkt_buf)(struct phl_info_t *phl_info,
struct rtw_h2c_pkt *_h2c_pkt, u32 buf_len);
void (*trx_reset)(struct phl_info_t *phl, u8 type);
void (*trx_resume)(struct phl_info_t *phl, u8 type);
void (*req_tx_stop)(struct phl_info_t *phl);
void (*req_rx_stop)(struct phl_info_t *phl);
bool (*is_tx_pause)(struct phl_info_t *phl);
bool (*is_rx_pause)(struct phl_info_t *phl);
void *(*get_txbd_buf)(struct phl_info_t *phl);
void *(*get_rxbd_buf)(struct phl_info_t *phl);
void (*recycle_rx_pkt)(struct phl_info_t *phl,
struct rtw_phl_rx_pkt *phl_rx);
enum rtw_phl_status (*register_trx_hdlr)(struct phl_info_t *phl);
void (*rx_handle_normal)(struct phl_info_t *phl_info,
struct rtw_phl_rx_pkt *phl_rx);
void (*tx_watchdog)(struct phl_info_t *phl_info);
#ifdef CONFIG_PCI_HCI
enum rtw_phl_status (*recycle_busy_wd)(struct phl_info_t *phl);
enum rtw_phl_status (*recycle_busy_h2c)(struct phl_info_t *phl);
#endif
#ifdef CONFIG_USB_HCI
enum rtw_phl_status (*pend_rxbuf)(struct phl_info_t *phl, void *rxobj,
u32 inbuf_len, u8 status_code);
enum rtw_phl_status (*recycle_tx_buf)(void *phl, u8 *tx_buf_ptr);
#endif
#if defined(CONFIG_SDIO_HCI) && defined(CONFIG_PHL_SDIO_READ_RXFF_IN_INT)
enum rtw_phl_status (*recv_rxfifo)(struct phl_info_t *phl);
#endif
};
/**
* struct phl_tid_ampdu_rx - TID aggregation information (Rx).
*
* @reorder_buf: buffer to reorder incoming aggregated MPDUs.
* @reorder_time: time when frame was added
* @sta: station we are attached to
* @head_seq_num: head sequence number in reordering buffer.
* @stored_mpdu_num: number of MPDUs in reordering buffer
* @ssn: Starting Sequence Number expected to be aggregated.
* @buf_size: buffer size for incoming A-MPDUs
* @timeout: reset timer value (in TUs).
* @tid: TID number
* @started: this session has started (head ssn or higher was received)
*/
struct phl_tid_ampdu_rx {
struct rtw_phl_rx_pkt **reorder_buf;
u32 *reorder_time;
struct rtw_phl_stainfo_t *sta;
u16 head_seq_num;
u16 stored_mpdu_num;
u16 ssn;
u16 buf_size;
u16 tid;
u8 started:1,
removed:1;
void *drv_priv;
struct phl_info_t *phl_info;
};
struct macid_ctl_t {
_os_lock lock;
/* used macid bitmap share for all wifi role */
u32 used_map[PHL_MACID_MAX_ARRAY_NUM];
/* record bmc macid bitmap for all wifi role */
u32 bmc_map[PHL_MACID_MAX_ARRAY_NUM];
/* record used macid bitmap for each wifi role */
u32 wifi_role_usedmap[MAX_WIFI_ROLE_NUMBER][PHL_MACID_MAX_ARRAY_NUM];
/* record bmc TX macid for wifi role */
u16 wrole_bmc[MAX_WIFI_ROLE_NUMBER];
/* record total stainfo by macid */
struct rtw_phl_stainfo_t *sta[PHL_MACID_MAX_NUM];
u16 max_num;
};
struct stainfo_ctl_t {
struct phl_info_t *phl_info;
u8 *allocated_stainfo_buf;
int allocated_stainfo_sz;
u8 *stainfo_buf;
struct phl_queue free_sta_queue;
};
struct phl_h2c_pkt_pool {
struct rtw_h2c_pkt *h2c_pkt_buf;
struct phl_queue idle_h2c_pkt_cmd_list;
struct phl_queue idle_h2c_pkt_data_list;
struct phl_queue idle_h2c_pkt_ldata_list;
struct phl_queue busy_h2c_pkt_list;
_os_lock recycle_lock;
};
#ifdef CONFIG_RTW_ACS
#ifndef MAX_CHANNEL_NUM
#define MAX_CHANNEL_NUM 42
#endif
struct auto_chan_sel {
u8 clm_ratio[MAX_CHANNEL_NUM];
u8 nhm_pwr[MAX_CHANNEL_NUM];
u8 curr_idx;
u16 chset[MAX_CHANNEL_NUM];
};
#endif
enum phl_tx_status {
PHL_TX_STATUS_IDLE = 0,
PHL_TX_STATUS_RUNNING = 1,
PHL_TX_STATUS_STOP_INPROGRESS = 2,
PHL_TX_STATUS_SW_PAUSE = 3,
PHL_TX_STATUS_MAX = 0xFF
};
enum phl_rx_status {
PHL_RX_STATUS_IDLE = 0,
PHL_RX_STATUS_RUNNING = 1,
PHL_RX_STATUS_STOP_INPROGRESS = 2,
PHL_RX_STATUS_SW_PAUSE = 3,
PHL_RX_STATUS_MAX = 0xFF
};
enum data_ctrl_mdl {
DATA_CTRL_MDL_NONE = 0,
DATA_CTRL_MDL_CMD_CTRLER = BIT0,
DATA_CTRL_MDL_SER = BIT1,
DATA_CTRL_MDL_PS = BIT2,
DATA_CTRL_MDL_MAX = BIT7
};
enum data_ctrl_err_code {
CTRL_ERR_SW_TX_PAUSE_POLLTO = 1,
CTRL_ERR_SW_TX_PAUSE_FAIL = 2,
CTRL_ERR_SW_TX_RESUME_FAIL = 3,
CTRL_ERR_SW_RX_PAUSE_POLLTO = 4,
CTRL_ERR_SW_RX_PAUSE_FAIL = 5,
CTRL_ERR_SW_RX_RESUME_FAIL = 6,
CTRL_ERR_HW_TRX_PAUSE_FAIL = 7,
CTRL_ERR_HW_TRX_RESUME_FAIL = 8,
CTRL_ERR_MAX = 0xFF
};
#define PHL_CTRL_TX BIT0
#define PHL_CTRL_RX BIT1
#define POLL_SW_TX_PAUSE_CNT 100
#define POLL_SW_TX_PAUSE_MS 5
#define POLL_SW_RX_PAUSE_CNT 100
#define POLL_SW_RX_PAUSE_MS 5
struct phl_info_t {
struct macid_ctl_t macid_ctrl;
struct stainfo_ctl_t sta_ctrl;
struct rtw_regulation regulation;
struct rtw_phl_com_t *phl_com;
struct rtw_phl_handler phl_tx_handler;
struct rtw_phl_handler phl_rx_handler;
struct rtw_phl_handler phl_event_handler;
struct rtw_phl_rx_ring phl_rx_ring;
_os_atomic phl_sw_tx_sts;
_os_atomic phl_sw_tx_more;
_os_atomic phl_sw_tx_req_pwr;
_os_atomic phl_sw_rx_sts;
_os_atomic phl_sw_rx_more;
_os_atomic phl_sw_rx_req_pwr;
_os_atomic is_hw_trx_pause;
enum data_ctrl_mdl pause_tx_id;
enum data_ctrl_mdl pause_rx_id;
_os_lock t_ring_list_lock;
_os_lock rx_ring_lock;
_os_lock t_fctrl_result_lock;
_os_lock t_ring_free_list_lock;
_os_list t_ring_list;
_os_list t_fctrl_result;
_os_list t_ring_free_list;
void *ring_sts_pool;
void *rx_pkt_pool;
struct phl_h2c_pkt_pool *h2c_pool;
struct hci_info_t *hci;
struct phl_hci_trx_ops *hci_trx_ops;
struct pkt_ofld_obj *pkt_ofld;
struct phl_cmd_dispatch_engine disp_eng;
struct phl_watchdog wdog;
void *msg_hub;
void *cmd_que;
void *hal;
#ifdef CONFIG_FSM
void *fsm_root;
void *cmd_fsm;
void *cmd_obj;
void *scan_fsm;
void *scan_obj;
void *ser_fsm;
void *ser_obj;
void *btc_fsm;
void *btc_obj;
void *snd_fsm;
#endif /*CONFIG_FSM*/
void *snd_obj;
void *ps_obj;
void *led_ctrl;
void *ecsa_ctrl;
void *phl_twt_info; /* struct phl_twt_info */
#ifdef PHL_RX_BATCH_IND
u8 rx_new_pending;
#endif
struct phl_wow_info wow_info;
#ifdef CONFIG_RTW_ACS
struct auto_chan_sel acs;
#endif
#ifdef CONFIG_PHL_TEST_SUITE
void *trx_test;
#endif
};
#define phl_to_drvpriv(_phl) (_phl->phl_com->drv_priv)
#define phlcom_to_test_mgnt(_phl_com) ((_phl_com)->test_mgnt)
#define phlcom_to_mr_ctrl(_phl_com) (&(_phl_com->mr_ctrl))
#define phl_to_mr_ctrl(_phl) (&(((struct phl_info_t *)_phl)->phl_com->mr_ctrl))
#define phl_to_mac_ctrl(_phlinfo) (&(_phlinfo->macid_ctrl))
#define phl_to_sta_ctrl(_phlinfo) (&(_phlinfo->sta_ctrl))
#define get_band_ctrl(_phl, _band) (&(phl_to_mr_ctrl(_phl)->band_ctrl[_band]))
#define phl_to_p2pps_info(_phl) (((_phl)->phl_com->p2pps_info))
#define get_role_idx(_wrole) (_wrole->id)
#endif /*_PHL_STRUCT_H_*/
|
2301_81045437/rtl8852be
|
phl/phl_struct.h
|
C
|
agpl-3.0
| 9,470
|
/******************************************************************************
*
* Copyright(c) 2019 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#include "phl_headers.h"
static void
_phl_sw_cap_para_init(
struct rtw_phl_com_t* phl_com, struct rtw_para_info_t *para_info)
{
para_info->para_src = RTW_PARA_SRC_INTNAL;
para_info->para_data = NULL;
para_info->para_data_len = 0;
}
static void
_phl_sw_cap_para_free(
struct rtw_phl_com_t* phl_com, struct rtw_para_info_t *para_info)
{
u32 buf_sz = MAX_HWCONFIG_FILE_CONTENT;
void *drv = phl_com->drv_priv;
if(para_info->para_data)
_os_mem_free(drv, para_info->para_data, buf_sz * sizeof(u32));
para_info->para_data = NULL;
para_info->para_data_len = 0;
}
static void
_phl_pwrlmt_para_init(
struct rtw_phl_com_t* phl_com, struct rtw_para_pwrlmt_info_t *para_info)
{
para_info->para_src = RTW_PARA_SRC_INTNAL;
para_info->para_data = NULL;
para_info->para_data_len = 0;
para_info->ext_regd_arridx = 0;
para_info->ext_reg_map_num = 0;
}
static void
_phl_pwrlmt_para_free(
struct rtw_phl_com_t* phl_com, struct rtw_para_pwrlmt_info_t *para_info)
{
u32 file_buf_sz = MAX_HWCONFIG_FILE_CONTENT;
u32 buf_sz = MAX_LINES_HWCONFIG_TXT;
void *drv = phl_com->drv_priv;
if(para_info->para_data)
_os_mem_free(drv, para_info->para_data, file_buf_sz * sizeof(u32));
para_info->para_data = NULL;
para_info->para_data_len = 0;
if(para_info->ext_reg_codemap)
_os_mem_free(drv, para_info->ext_reg_codemap, buf_sz * sizeof(u8));
para_info->ext_reg_codemap = NULL;
para_info->ext_reg_map_num = 0;
}
enum channel_width _phl_sw_cap_get_hi_bw(struct phy_cap_t *phy_cap)
{
enum channel_width bw = CHANNEL_WIDTH_20;
do {
if (phy_cap->bw_sup & BW_CAP_80_80M) {
bw = CHANNEL_WIDTH_80_80;
break;
} else if (phy_cap->bw_sup & BW_CAP_160M) {
bw = CHANNEL_WIDTH_160;
break;
} else if (phy_cap->bw_sup & BW_CAP_80M) {
bw = CHANNEL_WIDTH_80;
break;
} else if (phy_cap->bw_sup & BW_CAP_40M) {
bw = CHANNEL_WIDTH_40;
break;
} else if (phy_cap->bw_sup & BW_CAP_20M) {
bw = CHANNEL_WIDTH_20;
break;
}
} while (0);
return bw;
}
enum rtw_phl_status
phl_sw_cap_init(struct rtw_phl_com_t* phl_com)
{
#ifdef CONFIG_LOAD_PHY_PARA_FROM_FILE
struct phy_sw_cap_t *phy_sw_cap = NULL;
u8 idx=0;
for(idx=0; idx < 2 ; idx++)
{
phy_sw_cap = &phl_com->phy_sw_cap[idx];
_phl_sw_cap_para_init(phl_com, &phy_sw_cap->mac_reg_info);
_phl_sw_cap_para_init(phl_com, &phy_sw_cap->bb_phy_reg_info);
_phl_sw_cap_para_init(phl_com, &phy_sw_cap->bb_phy_reg_mp_info);
_phl_sw_cap_para_init(phl_com, &phy_sw_cap->bb_phy_reg_gain_info);
_phl_sw_cap_para_init(phl_com, &phy_sw_cap->rf_radio_a_info);
_phl_sw_cap_para_init(phl_com, &phy_sw_cap->rf_radio_b_info);
_phl_sw_cap_para_init(phl_com, &phy_sw_cap->rf_txpwr_byrate_info);
_phl_sw_cap_para_init(phl_com, &phy_sw_cap->rf_txpwrtrack_info);
_phl_pwrlmt_para_init(phl_com, &phy_sw_cap->rf_txpwrlmt_info);
_phl_pwrlmt_para_init(phl_com, &phy_sw_cap->rf_txpwrlmt_ru_info);
phy_sw_cap->bfreed_para = false;
}
phl_com->dev_sw_cap.bfree_para_info = false; /* Default keep Phy file param info*/
#endif
phl_com->dev_sw_cap.fw_cap.fw_src = RTW_FW_SRC_INTNAL;
phl_com->dev_sw_cap.btc_mode = BTC_MODE_NORMAL;
phl_com->dev_sw_cap.bypass_rfe_chk = false;
phl_com->dev_sw_cap.rf_board_opt = PHL_UNDEFINED_SW_CAP;
return RTW_PHL_STATUS_SUCCESS;
}
enum rtw_phl_status
phl_sw_cap_deinit(struct rtw_phl_com_t* phl_com)
{
#ifdef CONFIG_LOAD_PHY_PARA_FROM_FILE
struct phy_sw_cap_t *phy_sw_cap = NULL;
u8 idx=0;
for (idx = 0; idx < 2; idx++) {
phy_sw_cap = &phl_com->phy_sw_cap[idx];
if (phy_sw_cap->bfreed_para == true) {
PHL_TRACE(COMP_PHL_DBG, _PHL_INFO_, "already bfreed para_info->para_data\n");
return RTW_PHL_STATUS_SUCCESS;
}
PHL_TRACE(COMP_PHL_DBG, _PHL_INFO_, "To free para_info->para_data phy %d\n", idx);
_phl_sw_cap_para_free(phl_com, &phy_sw_cap->mac_reg_info);
_phl_sw_cap_para_free(phl_com, &phy_sw_cap->bb_phy_reg_info);
_phl_sw_cap_para_free(phl_com, &phy_sw_cap->bb_phy_reg_mp_info);
_phl_sw_cap_para_free(phl_com, &phy_sw_cap->bb_phy_reg_gain_info);
_phl_sw_cap_para_free(phl_com, &phy_sw_cap->rf_radio_a_info);
_phl_sw_cap_para_free(phl_com, &phy_sw_cap->rf_radio_b_info);
_phl_sw_cap_para_free(phl_com, &phy_sw_cap->rf_txpwr_byrate_info);
_phl_sw_cap_para_free(phl_com, &phy_sw_cap->rf_txpwrtrack_info);
_phl_pwrlmt_para_free(phl_com, &phy_sw_cap->rf_txpwrlmt_info);
_phl_pwrlmt_para_free(phl_com, &phy_sw_cap->rf_txpwrlmt_ru_info);
phy_sw_cap->bfreed_para = true;
}
#endif
return RTW_PHL_STATUS_SUCCESS;
}
void rtw_phl_init_free_para_buf(struct rtw_phl_com_t *phl_com)
{
#ifdef CONFIG_LOAD_PHY_PARA_FROM_FILE
if (phl_com->dev_sw_cap.bfree_para_info == true)
phl_sw_cap_deinit(phl_com);
#endif
}
u16 _phl_sw_role_cap_bf(enum role_type rtype)
{
u16 def_bf_cap = 0;
if (PHL_RTYPE_AP == rtype) {
/* AP mode : no MU BFee */
def_bf_cap = (HW_CAP_BFEE_HT_SU | HW_CAP_BFER_HT_SU |
HW_CAP_BFEE_VHT_SU | HW_CAP_BFER_VHT_SU |
HW_CAP_BFER_VHT_MU |
HW_CAP_BFEE_HE_SU | HW_CAP_BFER_HE_SU |
HW_CAP_BFER_HE_MU |
HW_CAP_HE_NON_TB_CQI | HW_CAP_HE_TB_CQI);
} else if (PHL_RTYPE_STATION == rtype) {
/* STA mode : no MU BFer */
def_bf_cap = (HW_CAP_BFEE_HT_SU | HW_CAP_BFER_HT_SU |
HW_CAP_BFEE_VHT_SU | HW_CAP_BFER_VHT_SU |
HW_CAP_BFEE_VHT_MU |
HW_CAP_BFEE_HE_SU | HW_CAP_BFER_HE_SU |
HW_CAP_BFEE_HE_MU |
HW_CAP_HE_NON_TB_CQI | HW_CAP_HE_TB_CQI);
} else {
def_bf_cap = (HW_CAP_BFEE_HT_SU | HW_CAP_BFER_HT_SU |
HW_CAP_BFEE_VHT_SU | HW_CAP_BFER_VHT_SU |
HW_CAP_BFEE_VHT_MU | HW_CAP_BFER_VHT_MU |
HW_CAP_BFEE_HE_SU | HW_CAP_BFER_HE_SU |
HW_CAP_BFEE_HE_MU | HW_CAP_BFER_HE_MU |
HW_CAP_HE_NON_TB_CQI | HW_CAP_HE_TB_CQI);
}
return def_bf_cap;
}
static void _phl_init_proto_bf_cap(struct phl_info_t *phl_info,
u8 hw_band, enum role_type rtype, struct protocol_cap_t *role_cap)
{
#ifdef RTW_WKARD_PHY_CAP
struct rtw_phl_com_t *phl_com = phl_info->phl_com;
struct role_sw_cap_t *sw_role_cap = &phl_com->role_sw_cap;
struct protocol_cap_t proto_cap = {0};
u16 bfcap = sw_role_cap->bf_cap;
/* First : compare and get the bf sw_proto_cap and hw_proto_cap .*/
if (RTW_HAL_STATUS_SUCCESS != rtw_hal_get_bf_proto_cap(
phl_com,
phl_info->hal,
hw_band,
&proto_cap)) {
PHL_TRACE(COMP_PHL_DBG, _PHL_ERR_,
"%s : Get SW/HW BF Cap FAIL, disable all of the BF functions.\n", __func__);
}
/* Second : filter bf cap with 802.11 spec */
bfcap &= _phl_sw_role_cap_bf(rtype);
/* Final : Compare with sw_role_cap->bf_cap to judge the final wrole's BF CAP. */
PHL_TRACE(COMP_PHL_DBG, _PHL_INFO_, "%s : sw_role_cap->bf_cap = 0x%x \n",
__func__, sw_role_cap->bf_cap);
if (!(bfcap & HW_CAP_BFEE_HT_SU) &&
(proto_cap.ht_su_bfme)) {
PHL_TRACE(COMP_PHL_DBG, _PHL_INFO_, "Disable HT SU BFEE by sw_role_cap.\n");
role_cap->ht_su_bfme = 0;
} else {
role_cap->ht_su_bfme = proto_cap.ht_su_bfme;
}
if (!(bfcap & HW_CAP_BFER_HT_SU) &&
(proto_cap.ht_su_bfmr)) {
PHL_TRACE(COMP_PHL_DBG, _PHL_INFO_, "Disable HT SU BFER by sw_role_cap.\n");
role_cap->ht_su_bfmr = 0;
} else {
role_cap->ht_su_bfmr = proto_cap.ht_su_bfmr;
}
if (!(bfcap & HW_CAP_BFEE_VHT_SU) &&
(proto_cap.vht_su_bfme)) {
PHL_TRACE(COMP_PHL_DBG, _PHL_INFO_, "Disable VHT SU BFEE by sw_role_cap.\n");
role_cap->vht_su_bfme = 0;
} else {
role_cap->vht_su_bfme = proto_cap.vht_su_bfme;
}
if (!(bfcap & HW_CAP_BFER_VHT_SU) &&
(proto_cap.vht_su_bfmr)) {
PHL_TRACE(COMP_PHL_DBG, _PHL_INFO_, "Disable VHT SU BFER by sw_role_cap.\n");
role_cap->vht_su_bfmr = 0;
} else {
role_cap->vht_su_bfmr = proto_cap.vht_su_bfmr;
}
if (!(bfcap & HW_CAP_BFEE_VHT_MU) &&
(proto_cap.vht_mu_bfme)) {
PHL_TRACE(COMP_PHL_DBG, _PHL_INFO_, "Disable VHT MU BFEE by sw_role_cap.\n");
role_cap->vht_mu_bfme = 0;
} else {
role_cap->vht_mu_bfme = proto_cap.vht_mu_bfme;
}
if (!(bfcap & HW_CAP_BFER_VHT_MU) &&
(proto_cap.vht_mu_bfmr)) {
PHL_TRACE(COMP_PHL_DBG, _PHL_INFO_, "Disable VHT MU BFER by sw_role_cap.\n");
role_cap->vht_mu_bfmr = 0;
} else {
role_cap->vht_mu_bfmr = proto_cap.vht_mu_bfmr;
}
if (!(bfcap & HW_CAP_BFEE_HE_SU) &&
(proto_cap.he_su_bfme)) {
PHL_TRACE(COMP_PHL_DBG, _PHL_INFO_, "Disable HE SU BFEE by sw_role_cap.\n");
role_cap->he_su_bfme = 0;
} else {
role_cap->he_su_bfme = proto_cap.he_su_bfme;
}
if (!(bfcap & HW_CAP_BFER_HE_SU) &&
(proto_cap.he_su_bfmr)) {
PHL_TRACE(COMP_PHL_DBG, _PHL_INFO_, "Disable HE SU BFER by sw_role_cap.\n");
role_cap->he_su_bfmr = 0;
} else {
role_cap->he_su_bfmr = proto_cap.he_su_bfmr;
}
if (!(bfcap & HW_CAP_BFEE_HE_MU) &&
(proto_cap.he_mu_bfme)) {
PHL_TRACE(COMP_PHL_DBG, _PHL_INFO_, "Disable HE MU BFEE by sw_role_cap.\n");
role_cap->he_mu_bfme = 0;
} else {
role_cap->he_mu_bfme = proto_cap.he_mu_bfme;
}
if (!(bfcap & HW_CAP_BFER_HE_MU) &&
(proto_cap.he_mu_bfmr)) {
PHL_TRACE(COMP_PHL_DBG, _PHL_INFO_, "Disable HE MU BFER by sw_role_cap.\n");
role_cap->he_mu_bfmr = 0;
} else {
role_cap->he_mu_bfmr = proto_cap.he_mu_bfmr;
}
if (!(bfcap & HW_CAP_HE_NON_TB_CQI) &&
(proto_cap.non_trig_cqi_fb)) {
PHL_TRACE(COMP_PHL_DBG, _PHL_INFO_, "Disable HE NON-TB CQI_FB by sw_role_cap.\n");
role_cap->non_trig_cqi_fb = 0;
} else {
role_cap->non_trig_cqi_fb = proto_cap.non_trig_cqi_fb;
}
if (!(bfcap & HW_CAP_HE_TB_CQI) &&
(proto_cap.trig_cqi_fb)) {
PHL_TRACE(COMP_PHL_DBG, _PHL_INFO_, "Disable HE TB CQI_FB by sw_role_cap.\n");
role_cap->trig_cqi_fb = 0;
} else {
role_cap->trig_cqi_fb = proto_cap.trig_cqi_fb;
}
#endif
}
static void _phl_init_proto_stbc_cap(struct phl_info_t *phl_info,
u8 hw_band, struct protocol_cap_t *proto_role_cap)
{
struct rtw_phl_com_t *phl_com = phl_info->phl_com;
struct role_sw_cap_t *sw_role_cap = &phl_com->role_sw_cap;
struct protocol_cap_t proto_cap = {0};
/* First : compare and get the stbc sw_proto_cap and hw_proto_cap .*/
if (RTW_HAL_STATUS_SUCCESS != rtw_hal_get_stbc_proto_cap(phl_com,
phl_info->hal,
hw_band,
&proto_cap)) {
PHL_TRACE(COMP_PHL_DBG, _PHL_ERR_,
"%s : Get SW/HW STBC proto_cap FAIL, disable all of the STBC functions.\n", __func__);
}
/* Final : Compare with sw_role_cap->stbc_cap to judge the final wrole's STBC CAP. */
PHL_TRACE(COMP_PHL_DBG, _PHL_INFO_, "%s : sw_role_cap->stbc_cap = 0x%x \n",
__func__, sw_role_cap->stbc_cap);
#ifdef RTW_WKARD_PHY_CAP
proto_role_cap->stbc_tx = 0; /* Removed later */
/* Check sw role cap, if it is not support, set proto_role_cap->xxx to 0 */
if (!(sw_role_cap->stbc_cap & HW_CAP_STBC_HT_TX) &&
(proto_cap.stbc_ht_tx)) {
proto_role_cap->stbc_ht_tx = 0;
PHL_TRACE(COMP_PHL_DBG, _PHL_INFO_, "Disable HT STBC Tx by sw_role_cap.\n");
} else {
proto_role_cap->stbc_ht_tx = proto_cap.stbc_ht_tx;
}
if (!(sw_role_cap->stbc_cap & HW_CAP_STBC_VHT_TX) &&
(proto_cap.stbc_vht_tx)) {
proto_role_cap->stbc_vht_tx = 0;
PHL_TRACE(COMP_PHL_DBG, _PHL_INFO_, "Disable VHT STBC Tx by sw_role_cap.\n");
} else {
proto_role_cap->stbc_vht_tx = proto_cap.stbc_vht_tx;
}
if (!(sw_role_cap->stbc_cap & HW_CAP_STBC_HE_TX) &&
(proto_cap.stbc_he_tx)) {
proto_role_cap->stbc_he_tx = 0;
PHL_TRACE(COMP_PHL_DBG, _PHL_INFO_, "Disable HE STBC Tx by sw_role_cap.\n");
} else {
proto_role_cap->stbc_he_tx = proto_cap.stbc_he_tx;
}
if (!(sw_role_cap->stbc_cap & HW_CAP_STBC_HE_TX_GT_80M) &&
(proto_cap.stbc_tx_greater_80mhz)) {
proto_role_cap->stbc_tx_greater_80mhz = 0;
PHL_TRACE(COMP_PHL_DBG, _PHL_INFO_, "Disable STBC Tx (greater than 80M) by sw_role_cap.\n");
} else {
proto_role_cap->stbc_tx_greater_80mhz = proto_cap.stbc_tx_greater_80mhz;
}
if (!(sw_role_cap->stbc_cap & HW_CAP_STBC_HT_RX) &&
(proto_cap.stbc_ht_rx)) {
proto_role_cap->stbc_ht_rx = 0;
PHL_TRACE(COMP_PHL_DBG, _PHL_INFO_, "Disable HT STBC Rx by sw_role_cap.\n");
} else {
proto_role_cap->stbc_ht_rx = proto_cap.stbc_ht_rx;
}
if (!(sw_role_cap->stbc_cap & HW_CAP_STBC_VHT_RX) &&
(proto_cap.stbc_vht_rx)) {
proto_role_cap->stbc_vht_rx = 0;
PHL_TRACE(COMP_PHL_DBG, _PHL_INFO_, "Disable VHT STBC Rx by sw_role_cap.\n");
} else {
proto_role_cap->stbc_vht_rx = proto_cap.stbc_vht_rx;
}
if (!(sw_role_cap->stbc_cap & HW_CAP_STBC_HE_RX) &&
(proto_cap.stbc_he_rx)) {
proto_role_cap->stbc_he_rx = 0;
PHL_TRACE(COMP_PHL_DBG, _PHL_INFO_, "Disable HE STBC Rx by sw_role_cap.\n");
} else {
proto_role_cap->stbc_he_rx = proto_cap.stbc_he_rx;
}
if (!(sw_role_cap->stbc_cap & HW_CAP_STBC_HE_RX_GT_80M) &&
(proto_cap.stbc_rx_greater_80mhz)) {
proto_role_cap->stbc_rx_greater_80mhz = 0;
PHL_TRACE(COMP_PHL_DBG, _PHL_INFO_, "Disable HE STBC Rx (greater than 80M) by sw_role_cap.\n");
} else {
proto_role_cap->stbc_rx_greater_80mhz = proto_cap.stbc_rx_greater_80mhz;
}
#endif
}
static enum rtw_phl_status
_phl_init_protocol_cap(struct phl_info_t *phl_info,
u8 hw_band, enum role_type rtype,
struct protocol_cap_t *proto_role_cap)
{
struct rtw_phl_com_t *phl_com = phl_info->phl_com;
/* TODO: Get protocol cap from sw and hw cap*/
if (rtype == PHL_RTYPE_AP) {
proto_role_cap->num_ampdu = 128;
proto_role_cap->ampdu_density = 0;
proto_role_cap->ampdu_len_exp = 0xff;
proto_role_cap->amsdu_in_ampdu = 1;
proto_role_cap->max_amsdu_len =
phl_com->proto_sw_cap[hw_band].max_amsdu_len;
proto_role_cap->htc_rx = 1;
proto_role_cap->sm_ps = 0;
proto_role_cap->trig_padding = 0;
#ifdef CONFIG_PHL_TWT
proto_role_cap->twt =
phl_com->dev_cap.twt_sup & RTW_PHL_TWT_RSP_SUP;
#else
proto_role_cap->twt = 0;
#endif /* CONFIG_PHL_TWT */
proto_role_cap->all_ack = 1;
proto_role_cap->a_ctrl = 0xe;
proto_role_cap->ops = 1;
proto_role_cap->ht_vht_trig_rx = 0;
proto_role_cap->bsscolor = 0x0E; /* Default BSS Color */
proto_role_cap->edca[RTW_AC_BE].ac = RTW_AC_BE;
proto_role_cap->edca[RTW_AC_BE].param = 0xA42B;
proto_role_cap->edca[RTW_AC_BK].ac = RTW_AC_BK;
proto_role_cap->edca[RTW_AC_BK].param = 0xA549;
proto_role_cap->edca[RTW_AC_VI].ac = RTW_AC_VI;
proto_role_cap->edca[RTW_AC_VI].param = 0x5E4326;
proto_role_cap->edca[RTW_AC_VO].ac = RTW_AC_VO;
proto_role_cap->edca[RTW_AC_VO].param = 0x2F3224;
proto_role_cap->ht_ldpc = 1;
proto_role_cap->vht_ldpc = 1;
proto_role_cap->he_ldpc = 1;
proto_role_cap->sgi_20 = 1;
proto_role_cap->sgi_40 = 1;
proto_role_cap->sgi_80 = 1;
proto_role_cap->sgi_160 = 0;
switch (phl_com->phy_cap[hw_band].rxss) {
default:
break;
case 1:
proto_role_cap->ht_rx_mcs[0] = 0xff;
proto_role_cap->vht_rx_mcs[0] = 0xfe;
proto_role_cap->vht_rx_mcs[1] = 0xff;
proto_role_cap->he_rx_mcs[0] = 0xfe;
proto_role_cap->he_rx_mcs[1] = 0xff;
break;
case 2:
proto_role_cap->ht_rx_mcs[0] = 0xff;
proto_role_cap->ht_rx_mcs[1] = 0xff;
proto_role_cap->vht_rx_mcs[0] = 0xfa;
proto_role_cap->vht_rx_mcs[1] = 0xff;
proto_role_cap->he_rx_mcs[0] = 0xfa;
proto_role_cap->he_rx_mcs[1] = 0xff;
break;
}
switch (phl_com->phy_cap[hw_band].txss) {
default:
break;
case 1:
proto_role_cap->ht_tx_mcs[0] = 0xff;
proto_role_cap->vht_tx_mcs[0] = 0xfe;
proto_role_cap->vht_tx_mcs[1] = 0xff;
proto_role_cap->he_tx_mcs[0] = 0xfe;
proto_role_cap->he_tx_mcs[1] = 0xff;
break;
case 2:
proto_role_cap->ht_tx_mcs[0] = 0xff;
proto_role_cap->ht_tx_mcs[1] = 0xff;
proto_role_cap->vht_tx_mcs[0] = 0xfa;
proto_role_cap->vht_tx_mcs[1] = 0xff;
proto_role_cap->he_tx_mcs[0] = 0xfa;
proto_role_cap->he_tx_mcs[1] = 0xff;
break;
}
proto_role_cap->ltf_gi = 0x3f; // bit-x
proto_role_cap->doppler_tx = 1;
proto_role_cap->doppler_rx = 0;
proto_role_cap->dcm_max_const_tx = 0;
proto_role_cap->dcm_max_nss_tx = 0;
proto_role_cap->dcm_max_const_rx = 3;
proto_role_cap->dcm_max_nss_rx = 0;
proto_role_cap->partial_bw_su_in_mu = 1;
_phl_init_proto_stbc_cap(phl_info, hw_band, proto_role_cap);
_phl_init_proto_bf_cap(phl_info, hw_band, rtype, proto_role_cap);
/* All of the HT/VHT/HE BFee */
if ((1 == proto_role_cap->ht_su_bfme) ||
(1 == proto_role_cap->vht_su_bfme) ||
(1 == proto_role_cap->vht_mu_bfme) ||
(1 == proto_role_cap->he_su_bfme) ||
(1 == proto_role_cap->he_mu_bfme) ||
(1 == proto_role_cap->non_trig_cqi_fb)||
(1 == proto_role_cap->trig_cqi_fb)) {
proto_role_cap->bfme_sts = 3;
proto_role_cap->bfme_sts_greater_80mhz = 0;
proto_role_cap->max_nc = 1;
} else {
proto_role_cap->bfme_sts = 0;
proto_role_cap->bfme_sts_greater_80mhz = 0;
proto_role_cap->max_nc = 0;
}
/* HE BFer */
if ((1 == proto_role_cap->he_su_bfmr) ||
(1 == proto_role_cap->he_mu_bfmr)) {
proto_role_cap->num_snd_dim = 1;
proto_role_cap->num_snd_dim_greater_80mhz = 0;
} else {
proto_role_cap->num_snd_dim = 0;
proto_role_cap->num_snd_dim_greater_80mhz = 0;
}
/* HE BFee */
if ((1 == proto_role_cap->he_su_bfme) ||
(1 == proto_role_cap->he_mu_bfme)) {
proto_role_cap->ng_16_su_fb = 1;
proto_role_cap->ng_16_mu_fb = 1;
proto_role_cap->cb_sz_su_fb = 1;
proto_role_cap->cb_sz_mu_fb = 1;
proto_role_cap->he_rx_ndp_4x32 = 1;
} else {
proto_role_cap->ng_16_su_fb = 0;
proto_role_cap->ng_16_mu_fb = 0;
proto_role_cap->cb_sz_su_fb = 0;
proto_role_cap->cb_sz_mu_fb = 0;
proto_role_cap->he_rx_ndp_4x32 = 0;
}
/*HE SU BFer or BFer*/
if ((1 == proto_role_cap->he_su_bfme) ||
(1 == proto_role_cap->he_su_bfmr)) {
proto_role_cap->trig_su_bfm_fb = 1;
} else {
proto_role_cap->trig_su_bfm_fb = 0;
}
/*HE MU BFer or BFer*/
if ((1 == proto_role_cap->he_mu_bfme) ||
(1 == proto_role_cap->he_mu_bfmr)) {
proto_role_cap->trig_mu_bfm_fb = 1;
} else {
proto_role_cap->trig_mu_bfm_fb = 0;
}
/* HT/VHT BFee */
if ((1 == proto_role_cap->vht_mu_bfme) ||
(1 == proto_role_cap->vht_su_bfme) ||
(1 == proto_role_cap->ht_su_bfme)) {
proto_role_cap->ht_vht_ng = 0; /* vht ng = 1 */
proto_role_cap->ht_vht_cb = 1; /* vht_mu{9,7}/vht_su{6,4}/ht{4,2} */
}
proto_role_cap->partial_bw_su_er = 1;
proto_role_cap->pkt_padding = 2;
proto_role_cap->pwr_bst_factor = 1;
proto_role_cap->dcm_max_ru = 2;
proto_role_cap->long_sigb_symbol = 1;
proto_role_cap->tx_1024q_ru = 0;
proto_role_cap->rx_1024q_ru = 1;
proto_role_cap->fbw_su_using_mu_cmprs_sigb = 1;
proto_role_cap->fbw_su_using_mu_non_cmprs_sigb = 1;
proto_role_cap->nss_tx =
phl_com->phy_cap[hw_band].txss;
proto_role_cap->nss_rx =
phl_com->phy_cap[hw_band].rxss;
} else if (rtype == PHL_RTYPE_STATION) {
proto_role_cap->num_ampdu = 128;
proto_role_cap->ampdu_density = 0;
proto_role_cap->ampdu_len_exp = 0xff;
proto_role_cap->amsdu_in_ampdu = 1;
proto_role_cap->max_amsdu_len =
phl_com->proto_sw_cap[hw_band].max_amsdu_len;
proto_role_cap->htc_rx = 1;
proto_role_cap->sm_ps = 3;
proto_role_cap->trig_padding = 2;
#ifdef CONFIG_PHL_TWT
proto_role_cap->twt =
phl_com->dev_cap.twt_sup & RTW_PHL_TWT_REQ_SUP;
#else
proto_role_cap->twt = 0;
#endif /* CONFIG_PHL_TWT */
proto_role_cap->all_ack = 1;
proto_role_cap->a_ctrl = 0x6;
proto_role_cap->ops = 1;
proto_role_cap->ht_vht_trig_rx = 1;
proto_role_cap->edca[RTW_AC_BE].ac = RTW_AC_BE;
proto_role_cap->edca[RTW_AC_BE].param = 0xA42B;
proto_role_cap->edca[RTW_AC_BK].ac = RTW_AC_BK;
proto_role_cap->edca[RTW_AC_BK].param = 0xA549;
proto_role_cap->edca[RTW_AC_VI].ac = RTW_AC_VI;
proto_role_cap->edca[RTW_AC_VI].param = 0x5E4326;
proto_role_cap->edca[RTW_AC_VO].ac = RTW_AC_VO;
proto_role_cap->edca[RTW_AC_VO].param = 0x2F3224;
proto_role_cap->ht_ldpc = 1;
proto_role_cap->vht_ldpc = 1;
proto_role_cap->he_ldpc = 1;
proto_role_cap->sgi_20 = 1;
proto_role_cap->sgi_40 = 1;
proto_role_cap->sgi_80 = 1;
proto_role_cap->sgi_160 = 0;
switch (phl_com->phy_cap[hw_band].rxss) {
default:
break;
case 1:
proto_role_cap->ht_rx_mcs[0] = 0xff;
proto_role_cap->vht_rx_mcs[0] = 0xfe;
proto_role_cap->vht_rx_mcs[1] = 0xff;
proto_role_cap->he_rx_mcs[0] = 0xfe;
proto_role_cap->he_rx_mcs[1] = 0xff;
break;
case 2:
proto_role_cap->ht_rx_mcs[0] = 0xff;
proto_role_cap->ht_rx_mcs[1] = 0xff;
proto_role_cap->vht_rx_mcs[0] = 0xfa;
proto_role_cap->vht_rx_mcs[1] = 0xff;
proto_role_cap->he_rx_mcs[0] = 0xfa;
proto_role_cap->he_rx_mcs[1] = 0xff;
break;
}
switch (phl_com->phy_cap[hw_band].txss) {
default:
break;
case 1:
proto_role_cap->ht_tx_mcs[0] = 0xff;
proto_role_cap->vht_tx_mcs[0] = 0xfe;
proto_role_cap->vht_tx_mcs[1] = 0xff;
proto_role_cap->he_tx_mcs[0] = 0xfe;
proto_role_cap->he_tx_mcs[1] = 0xff;
break;
case 2:
proto_role_cap->ht_tx_mcs[0] = 0xff;
proto_role_cap->ht_tx_mcs[1] = 0xff;
proto_role_cap->vht_tx_mcs[0] = 0xfa;
proto_role_cap->vht_tx_mcs[1] = 0xff;
proto_role_cap->he_tx_mcs[0] = 0xfa;
proto_role_cap->he_tx_mcs[1] = 0xff;
break;
}
proto_role_cap->ltf_gi = 0x3f; // bit-x
proto_role_cap->doppler_tx = 1;
proto_role_cap->doppler_rx = 0;
proto_role_cap->dcm_max_const_tx = 3;
proto_role_cap->dcm_max_nss_tx = 1;
proto_role_cap->dcm_max_const_rx = 3;
proto_role_cap->dcm_max_nss_rx = 0;
_phl_init_proto_stbc_cap(phl_info, hw_band, proto_role_cap);
_phl_init_proto_bf_cap(phl_info, hw_band, rtype, proto_role_cap);
/* All of the HT/VHT/HE BFee */
if ((1 == proto_role_cap->ht_su_bfme) ||
(1 == proto_role_cap->vht_su_bfme) ||
(1 == proto_role_cap->vht_mu_bfme) ||
(1 == proto_role_cap->he_su_bfme) ||
(1 == proto_role_cap->he_mu_bfme) ||
(1 == proto_role_cap->non_trig_cqi_fb) ||
(1 == proto_role_cap->trig_cqi_fb)) {
proto_role_cap->bfme_sts = 3;
proto_role_cap->bfme_sts_greater_80mhz = 0;
proto_role_cap->max_nc = 1;
} else {
proto_role_cap->bfme_sts = 0;
proto_role_cap->bfme_sts_greater_80mhz = 0;
proto_role_cap->max_nc = 0;
}
/* HE BFer */
if ((1 == proto_role_cap->he_su_bfmr) ||
(1 == proto_role_cap->he_mu_bfmr)) {
proto_role_cap->num_snd_dim = 1;
proto_role_cap->num_snd_dim_greater_80mhz = 0;
} else {
proto_role_cap->num_snd_dim = 0;
proto_role_cap->num_snd_dim_greater_80mhz = 0;
}
/* HE BFee */
if ((1 == proto_role_cap->he_su_bfme) ||
(1 == proto_role_cap->he_mu_bfme)) {
#ifdef RTW_WKARD_BFEE_DISABLE_NG16
proto_role_cap->ng_16_su_fb = 0;
proto_role_cap->ng_16_mu_fb = 0;
#else
proto_role_cap->ng_16_su_fb = 1;
proto_role_cap->ng_16_mu_fb = 1;
#endif
proto_role_cap->cb_sz_su_fb = 1;
proto_role_cap->cb_sz_mu_fb = 1;
proto_role_cap->he_rx_ndp_4x32 = 1;
} else {
proto_role_cap->ng_16_su_fb = 0;
proto_role_cap->ng_16_mu_fb = 0;
proto_role_cap->cb_sz_su_fb = 0;
proto_role_cap->cb_sz_mu_fb = 0;
proto_role_cap->he_rx_ndp_4x32 = 0;
}
/*HE SU BFer or BFer*/
if ((1 == proto_role_cap->he_su_bfme) ||
(1 == proto_role_cap->he_su_bfmr)) {
proto_role_cap->trig_su_bfm_fb = 1;
} else {
proto_role_cap->trig_su_bfm_fb = 0;
}
/*HE MU BFer or BFer*/
if ((1 == proto_role_cap->he_mu_bfme) ||
(1 == proto_role_cap->he_mu_bfmr)) {
proto_role_cap->trig_mu_bfm_fb = 1;
} else {
proto_role_cap->trig_mu_bfm_fb = 0;
}
/* HT/VHT BFee */
if ((1 == proto_role_cap->vht_mu_bfme) ||
(1 == proto_role_cap->vht_su_bfme) ||
(1 == proto_role_cap->ht_su_bfme)) {
proto_role_cap->ht_vht_ng = 0; /* vht ng = 1 */
proto_role_cap->ht_vht_cb = 1; /* vht_mu{9,7}/vht_su{6,4}/ht{4,2} */
}
proto_role_cap->partial_bw_su_in_mu = 0;
proto_role_cap->partial_bw_su_er = 1;
proto_role_cap->pkt_padding = 2;
proto_role_cap->pwr_bst_factor = 1;
proto_role_cap->dcm_max_ru = 2;
proto_role_cap->long_sigb_symbol = 1;
proto_role_cap->tx_1024q_ru = 1;
proto_role_cap->rx_1024q_ru = 1;
proto_role_cap->fbw_su_using_mu_cmprs_sigb = 1;
proto_role_cap->fbw_su_using_mu_non_cmprs_sigb = 1;
proto_role_cap->nss_tx =
phl_com->phy_cap[hw_band].txss;
proto_role_cap->nss_rx =
phl_com->phy_cap[hw_band].rxss;
}
return RTW_PHL_STATUS_SUCCESS;
}
enum rtw_phl_status
phl_init_protocol_cap(struct phl_info_t *phl_info,
struct rtw_wifi_role_t *wifi_role)
{
enum rtw_phl_status ret = RTW_PHL_STATUS_SUCCESS;
struct protocol_cap_t *role_proto_cap = &wifi_role->proto_role_cap;
_os_mem_set(phl_to_drvpriv(phl_info),
role_proto_cap, 0, sizeof(struct protocol_cap_t));
ret = _phl_init_protocol_cap(phl_info, wifi_role->hw_band, wifi_role->type,
role_proto_cap);
if (ret == RTW_PHL_STATUS_FAILURE)
PHL_ERR("wrole:%d - %s failed\n", wifi_role->id, __func__);
return ret;
}
static enum rtw_phl_status
_phl_init_role_cap(struct phl_info_t *phl_info,
u8 hw_band, struct role_cap_t *role_cap)
{
struct rtw_phl_com_t *phl_com = phl_info->phl_com;
#ifdef RTW_WKARD_PHY_CAP
role_cap->wmode = phl_com->phy_cap[hw_band].proto_sup;
role_cap->bw = _phl_sw_cap_get_hi_bw(&phl_com->phy_cap[hw_band]);
role_cap->rty_lmt = 0xFF; /* default follow CR */
role_cap->rty_lmt_rts = 0xFF; /* default follow CR */
role_cap->tx_htc = 1;
role_cap->tx_sgi = 1;
role_cap->tx_ht_ldpc = 1;
role_cap->tx_vht_ldpc = 1;
role_cap->tx_he_ldpc = 1;
role_cap->tx_ht_stbc = 1;
role_cap->tx_vht_stbc = 1;
role_cap->tx_he_stbc = 1;
#endif
return RTW_PHL_STATUS_SUCCESS;
}
enum rtw_phl_status
phl_init_role_cap(struct phl_info_t *phl_info,
struct rtw_wifi_role_t *wifi_role)
{
struct role_cap_t *role_cap = &wifi_role->cap;
enum rtw_phl_status ret = RTW_PHL_STATUS_SUCCESS;
_os_mem_set(phl_to_drvpriv(phl_info),
role_cap, 0, sizeof(struct role_cap_t));
ret = _phl_init_role_cap(phl_info, wifi_role->hw_band, role_cap);
ret = phl_custom_init_role_cap(phl_info, wifi_role->hw_band, role_cap);
return RTW_PHL_STATUS_SUCCESS;
}
enum rtw_phl_status
rtw_phl_get_dft_proto_cap(void *phl, u8 hw_band, enum role_type rtype,
struct protocol_cap_t *role_proto_cap)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
_os_mem_set(phl_to_drvpriv(phl_info),
role_proto_cap, 0, sizeof(struct protocol_cap_t));
return _phl_init_protocol_cap(phl_info, hw_band, rtype,
role_proto_cap);
}
enum rtw_phl_status
rtw_phl_get_dft_cap(void *phl, u8 hw_band, struct role_cap_t *role_cap)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
_os_mem_set(phl_to_drvpriv(phl_info),
role_cap, 0, sizeof(struct role_cap_t));
return _phl_init_role_cap(phl_info, hw_band, role_cap);
}
void rtw_phl_final_cap_decision(void * phl)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct rtw_phl_com_t *phl_com = phl_info->phl_com;
#ifdef CONFIG_PHL_DFS
phl_com->dfs_info.region_domain = DFS_REGD_ETSI;
#endif
rtw_hal_final_cap_decision(phl_com, phl_info->hal);
}
|
2301_81045437/rtl8852be
|
phl/phl_sw_cap.c
|
C
|
agpl-3.0
| 27,356
|
/******************************************************************************
*
* Copyright(c) 2019 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#ifndef _PHL_SW_CAP_H_
#define _PHL_SW_CAP_H_
enum rtw_phl_status
phl_sw_cap_init(struct rtw_phl_com_t* phl_com);
enum rtw_phl_status
phl_sw_cap_deinit(struct rtw_phl_com_t* phl_com);
enum rtw_phl_status
phl_init_protocol_cap(struct phl_info_t *phl_info,
struct rtw_wifi_role_t *wifi_role);
enum rtw_phl_status
phl_init_role_cap(struct phl_info_t *phl_info,
struct rtw_wifi_role_t *wifi_role);
void rtw_phl_init_free_para_buf(struct rtw_phl_com_t *phl_com);
#endif /*_PHL_SW_CAP_H_*/
|
2301_81045437/rtl8852be
|
phl/phl_sw_cap.h
|
C
|
agpl-3.0
| 1,149
|
/******************************************************************************
*
* Copyright(c) 2019 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#ifndef _TEST_MODULE_H_
#define _TEST_MODULE_H_
#ifdef CONFIG_PHL_TEST_SUITE
u8 phl_test_module_init(struct phl_info_t *phl_info);
void phl_test_module_deinit(struct rtw_phl_com_t* phl_com);
u8 phl_test_module_start(struct rtw_phl_com_t* phl_com);
void phl_test_module_stop(struct rtw_phl_com_t* phl_com);
/* phl test mp command */
enum rtw_phl_status phl_test_mp_alloc(struct phl_info_t *phl_info, void *hal, void **mp);
void phl_test_mp_free(void **mp);
void phl_test_mp_init(void *mp);
void phl_test_mp_deinit(void *mp);
void phl_test_mp_start(void *mp, u8 tm_mode);
void phl_test_mp_stop(void *mp, u8 tm_mode);
void phl_test_mp_cmd_process(void *mp, void *buf, u32 buf_len, u8 submdid);
void phl_test_mp_get_rpt(void *mp, void *buf, u32 buf_len);
/* phl test verify command */
enum rtw_phl_status phl_test_verify_alloc(struct phl_info_t *phl_info, void *hal, void **ctx);
void phl_test_verify_free(void **ctx);
void phl_test_verify_init(void *ctx);
void phl_test_verify_deinit(void *ctx);
void phl_test_verify_start(void *ctx);
void phl_test_verify_stop(void *ctx);
void phl_test_verify_cmd_process(void *ctx, void *buf, u32 buf_len, u8 submdid);
void phl_test_verify_get_rpt(void *ctx, void *buf, u32 buf_len);
#else
#define phl_test_module_init(phl_info) true
#define phl_test_module_deinit(phl_com)
#define phl_test_module_start(phl_com) true
#define phl_test_module_stop(phl_com)
/* phl test mp command */
#define phl_test_mp_alloc(phl_info, hal, mp) RTW_PHL_STATUS_SUCCESS
#define phl_test_mp_free(mp)
#define phl_test_mp_init(mp)
#define phl_test_mp_deinit(mp)
#define phl_test_mp_start(mp, tm_mode)
#define phl_test_mp_stop(mp, tm_mode)
#define phl_test_mp_cmd_process(mp, buf, buf_len, submdid)
#define phl_test_mp_get_rpt(mp, buf, buf_len)
/* phl test verify command */
#define phl_test_verify_alloc(phl_info, hal, ctx)
#define phl_test_verify_free(ctx)
#define phl_test_verify_init(ctx)
#define phl_test_verify_deinit(ctx)
#define phl_test_verify_start(ctx)
#define phl_test_verify_stop(ctx)
#define phl_test_verify_cmd_process(ctx, buf, buf_len, submdid)
#define phl_test_verify_get_rpt(ctx, buf, buf_len)
#endif /*CONFIG_PHL_TEST_SUITE*/
#endif /* _TEST_MODULE_H_ */
|
2301_81045437/rtl8852be
|
phl/phl_test.h
|
C
|
agpl-3.0
| 2,839
|
/******************************************************************************
*
* Copyright(c) 2019 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#ifndef _TEST_MODULE_DEF_H_
#define _TEST_MODULE_DEF_H_
#ifdef CONFIG_PHL_TEST_SUITE
#define TEST_NAME_LEN 32
#define TEST_RPT_RSN_LEN 32
#define TEST_LVL_LOW_TO 1000
#define TEST_LVL_NORMAL_TO 50
#define TEST_LVL_HIGH_TO 20
/*add cusstomized BP types in the following enum,
and hanle them in corresponding bp handler*/
enum TEST_BP_INFO_TYPE{
BP_INFO_TYPE_NONE = 0,
BP_INFO_TYPE_WAIT_BEACON_JOIN = 0x1,
BP_INFO_TYPE_SEND_AUTH_ODD = 0x2,
BP_INFO_TYPE_SEND_ASOC_REQ = 0x3,
BP_INFO_TYPE_SEND_DISASSOC = 0x4,
BP_INFO_TYPE_FILL_DISASSOC_RSN = 0x5,
BP_INFO_TYPE_SEND_PROBE_REQ = 0x6,
BP_INFO_TYPE_MP_CMD_EVENT = 0x7,
BP_INFO_TYPE_RX_TEST_WPRPT = 0x8,
BP_INFO_TYPE_RX_TEST_PATTERN = 0x9,
BP_INFO_TYPE_MP_RX_PHYSTS = 0xA,
BP_INFO_TYPE_TX_4_WAY = 0xB,
BP_INFO_TYPE_RX_4_WAY = 0xC,
BP_INFO_TYPE_MAX
};
enum TEST_RUN_LVL{
TEST_LVL_NONE = 0,
TEST_LVL_LOW,
TEST_LVL_NORMAL,
TEST_LVL_HIGH,
TEST_LVL_MAX
};
enum TEST_BP_RETURN_TYPE{
BP_RET_SKIP_SECTION = 0,
BP_RET_RUN_ORIGIN_SEC,
BP_RET_LEAVE_FUNC,
BP_RET_MAX
};
enum TEST_SUB_MODULE {
TEST_SUB_MODULE_MP = 0,
TEST_SUB_MODULE_FPGA = 1,
TEST_SUB_MODULE_VERIFY = 2,
TEST_SUB_MODULE_TOOL = 3,
TEST_SUB_MODULE_TRX = 4,
TEST_SUB_MODULE_UNKNOWN,
};
enum TEST_MODULE_MODE_TYPE {
UNIT_TEST_MODE = 0,
INTGR_TEST_MODE = 1,
FUNC_TEST_MODE = 2
};
struct test_bp_info{
enum TEST_BP_INFO_TYPE type;
u32 len;
void* ptr;
};
/**
* test_obj_ctrl_interface - basic test control methods for generic management.
* @start_test: test entry, initiate & run a test
* @is_test_end: return true when test ends.
* NOTE: Do not use evt/lock inside this method for sync.
* @is_test_pass: return true when test passed.
* @get_fail_rsn: if test fails, construct a reasonable string as fail description,
* not just a status code.
* @bp_handler: handle break point which is currently being hit,
* use rtw_phl_test_setup_bp to add new break point in source code
* and add customized BP type in TEST_BP_INFO_TYPE for recognition.
*/
struct test_obj_ctrl_interface{
u8 (*start_test)(void *priv);
u8 (*is_test_end)(void *priv);
u8 (*is_test_pass)(void *priv);
u8 (*get_fail_rsn)(void *priv,char* rsn, u32 max_len);
u8 (*bp_handler)(void *priv, struct test_bp_info* bp_info);
};
struct test_object {
_os_list list;
void* priv;
enum TEST_RUN_LVL run_lvl;
char name[TEST_NAME_LEN];
struct test_obj_ctrl_interface ctrl;
s32 total_time_ms; // optional, set 0 to use default see TEST_LVL_LOW_TO
};
struct test_rpt {
char name[TEST_NAME_LEN];
u8 status;
char rsn[TEST_RPT_RSN_LEN];
u32 total_time; // in ms
};
void rtw_phl_test_submodule_init(struct rtw_phl_com_t* phl_com, void *buf);
void rtw_phl_test_submodule_deinit(struct rtw_phl_com_t* phl_com, void *buf);
void rtw_phl_test_submodule_cmd_process(struct rtw_phl_com_t* phl_com, void *buf, u32 buf_len);
void rtw_phl_test_submodule_get_rpt(struct rtw_phl_com_t* phl_com, void *buf, u32 buf_len);
u8 rtw_phl_test_add_new_test_obj(struct rtw_phl_com_t* phl_com,
char *name,
void* priv,
enum TEST_RUN_LVL lvl,
struct test_obj_ctrl_interface* ctrl_intf,
s32 total_time_ms,
u8 objid,
u8 test_mode);
u8 rtw_phl_test_setup_bp(struct rtw_phl_com_t* phl_com,
struct test_bp_info* bp_info,
u8 submdid);
u8 rtw_phl_test_is_test_complete(struct rtw_phl_com_t* phl_com);
u8 rtw_phl_test_get_rpt(struct rtw_phl_com_t* phl_com, u8* buf, u32 len);
u8 rtw_phl_test_set_max_run_time(struct rtw_phl_com_t* phl_com, enum TEST_RUN_LVL lvl, u32 timeout_ms);
enum rtw_phl_status rtw_phl_reset(void *phl);
#else
#define rtw_phl_test_submodule_init(phl_com, buf)
#define rtw_phl_test_submodule_deinit(phl_com, buf)
#define rtw_phl_test_submodule_cmd_process(phl_com, buf, buf_len)
#define rtw_phl_test_submodule_get_rpt(phl_com, buf, buf_len)
#define rtw_phl_test_add_new_test_obj(phl_com, name, priv, lvl, ctrl_intf, total_time_ms, objid, test_mode) true
#define rtw_phl_test_setup_bp(phl_com, bp_info, submdid) true
#define rtw_phl_test_is_test_complete(phl_com) true
#define rtw_phl_test_get_rpt(phl_com, buf, len) true
#define rtw_phl_test_set_max_run_time(phl_com, lvl, timeout_ms) true
#endif /*CONFIG_PHL_TEST_SUITE*/
#endif /* _TEST_MODULE_DEF_H_ */
|
2301_81045437/rtl8852be
|
phl/phl_test_def.h
|
C
|
agpl-3.0
| 5,098
|
/******************************************************************************
*
* Copyright(c) 2020 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#define _PHL_THERMAL_C_
#include "phl_headers.h"
#ifdef CONFIG_PHL_THERMAL_PROTECT
static void _phl_thermal_protect_disable_all_txop(
struct phl_info_t *phl_info,
bool disable)
{
struct rtw_phl_com_t *phl_com = phl_info->phl_com;
struct mr_ctl_t *mr_ctl = phlcom_to_mr_ctrl(phl_com);
struct rtw_wifi_role_t *wrole = NULL;
struct rtw_phl_stainfo_t *sta = NULL;
struct rtw_edca_param edca = {0};
u8 i = 0;
for (i = 0; i < MAX_WIFI_ROLE_NUMBER; i++) {
if (mr_ctl->role_map & BIT(i)) {
wrole = phl_get_wrole_by_ridx(phl_info, i);
if(wrole){
if(wrole->mstate == MLME_LINKED)
break;
}
wrole = NULL;
continue;
}
}
if(wrole == NULL)
return;
sta = rtw_phl_get_stainfo_self(phl_info, wrole);
if(sta == NULL)
return;
for(i = 0; i < 4;i++){
edca.ac = i;
edca.param = sta->asoc_cap.edca[edca.ac].param;
if(disable)
edca.param &= 0x0000FFFF;
if(rtw_hal_set_edca(phl_info->hal, wrole, edca.ac, edca.param)
!= RTW_HAL_STATUS_SUCCESS)
PHL_ERR("%s Config edca fail\n", __FUNCTION__);
}
}
static void _phl_thermal_protect_reduce_ampdu_num(
struct phl_info_t *phl_info,
u8 ratio)
{
struct rtw_phl_com_t *phl_com = phl_info->phl_com;
struct mr_ctl_t *mr_ctl = phlcom_to_mr_ctrl(phl_com);
struct rtw_wifi_role_t *wrole = NULL;
struct rtw_phl_stainfo_t *sta = NULL;
u8 i = 0;
for (i = 0; i < MAX_WIFI_ROLE_NUMBER; i++) {
if (mr_ctl->role_map & BIT(i)) {
wrole = phl_get_wrole_by_ridx(phl_info, i);
if(wrole){
if(wrole->mstate == MLME_LINKED)
break;
}
wrole = NULL;
continue;
}
}
if(wrole == NULL)
return;
sta = rtw_phl_get_stainfo_self(phl_info, wrole);
if(sta == NULL)
return;
if(ratio != 0){
if(rtw_hal_thermal_protect_cfg_tx_ampdu(phl_info->hal, sta, ratio)
!= RTW_HAL_STATUS_SUCCESS)
PHL_ERR("%s Thermal protect cfg tx ampdu fail\n", __FUNCTION__);
}
else{
if(sta->asoc_cap.num_ampdu_bk != 0){
sta->asoc_cap.num_ampdu = sta->asoc_cap.num_ampdu_bk;
sta->asoc_cap.num_ampdu_bk = 0;
}
if(rtw_hal_cfg_tx_ampdu(phl_info->hal, sta) !=
RTW_HAL_STATUS_SUCCESS)
PHL_ERR("%s Thermal protect restore tx ampdu fail\n", __FUNCTION__);
}
}
void phl_thermal_protect_watchdog(struct phl_info_t *phl_info)
{
struct rtw_phl_com_t *phl_com = phl_info->phl_com;
bool action_changed = false;
if(phl_com->drv_mode != RTW_DRV_MODE_NORMAL &&
phl_com->drv_mode != RTW_DRV_MODE_HIGH_THERMAL)
return;
action_changed = rtw_hal_check_thermal_protect(phl_com, phl_info->hal);
if(action_changed == false)
return;
switch (phl_com->thermal_protect_action){
case PHL_THERMAL_PROTECT_ACTION_NONE:
_phl_thermal_protect_disable_all_txop(phl_info, false);
_phl_thermal_protect_reduce_ampdu_num(phl_info, 0);
break;
case PHL_THERMAL_PROTECT_ACTION_LEVEL1:
_phl_thermal_protect_disable_all_txop(phl_info, true);
_phl_thermal_protect_reduce_ampdu_num(phl_info, 70);
break;
case PHL_THERMAL_PROTECT_ACTION_LEVEL2:
_phl_thermal_protect_reduce_ampdu_num(phl_info, 50);
break;
default:
break;
}
}
#endif /* CONFIG_PHL_THERMAL_PROTECT */
void phl_thermal_protect_cfg_tx_duty(
struct phl_info_t *phl_info,
u16 tx_duty_interval,
u8 ratio)
{
enum rtw_hal_status hal_status = RTW_HAL_STATUS_SUCCESS;
hal_status = rtw_hal_thermal_protect_cfg_tx_duty(phl_info->hal,
tx_duty_interval,
ratio);
if(hal_status != RTW_HAL_STATUS_SUCCESS)
PHL_ERR("%s Thermal protect cfg tx duty fail\n", __FUNCTION__);
}
void phl_thermal_protect_stop_tx_duty(struct phl_info_t *phl_info)
{
enum rtw_hal_status hal_status = RTW_HAL_STATUS_SUCCESS;
hal_status = rtw_hal_thermal_protect_stop_tx_duty(phl_info->hal);
if(hal_status != RTW_HAL_STATUS_SUCCESS)
PHL_ERR("%s Thermal protect stop tx duty fail\n", __FUNCTION__);
}
|
2301_81045437/rtl8852be
|
phl/phl_thermal.c
|
C
|
agpl-3.0
| 4,423
|
/******************************************************************************
*
* Copyright(c) 2020 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#ifndef _PHL_THERMAL_H_
#define _PHL_THERMAL_H_
void phl_thermal_protect_watchdog(struct phl_info_t *phl_info);
void phl_thermal_protect_cfg_tx_duty(
struct phl_info_t *phl_info,
u16 tx_duty_interval,
u8 ratio);
void phl_thermal_protect_stop_tx_duty(struct phl_info_t *phl_info);
#endif /*_PHL_THERMAL_H_*/
|
2301_81045437/rtl8852be
|
phl/phl_thermal.h
|
C
|
agpl-3.0
| 962
|
/******************************************************************************
*
* Copyright(c) 2019 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#ifndef __PHL_TRX_DEF_H_
#define __PHL_TRX_DEF_H_
/* core / phl common structrue */
#define MAX_PHL_RING_ENTRY_NUM 4096
#define MAX_PHL_RING_CAT_NUM 10 /* 8 tid + 1 mgnt + 1 hiq*/
#define MAX_PHL_RING_RX_PKT_NUM 8192
#define MAX_RX_BUF_SEG_NUM 4
#define _H2CB_CMD_QLEN 32
#define _H2CB_DATA_QLEN 32
#define _H2CB_LONG_DATA_QLEN 200 /* should be refined */
#define MAX_H2C_PKT_NUM (_H2CB_CMD_QLEN + _H2CB_DATA_QLEN + _H2CB_LONG_DATA_QLEN)
#define FWCMD_HDR_LEN 8
#define _WD_BODY_LEN 24
#define H2C_CMD_LEN 64
#define H2C_DATA_LEN 256
#define H2C_LONG_DATA_LEN 2048
#define get_h2c_size_by_range(i) \
((i < _H2CB_CMD_QLEN) ? \
(FWCMD_HDR_LEN + _WD_BODY_LEN + H2C_CMD_LEN) : \
((i < (_H2CB_CMD_QLEN + _H2CB_DATA_QLEN)) ? \
(FWCMD_HDR_LEN + _WD_BODY_LEN + H2C_DATA_LEN) : \
(FWCMD_HDR_LEN + _WD_BODY_LEN + H2C_LONG_DATA_LEN)))
struct rtw_h2c_pkt {
_os_list list;
u8 *vir_head; /* should not reset */
u8 *vir_data;
u8 *vir_end;
u8 *vir_tail;
void *os_rsvd[1];
u8 type;
u32 id; /* h2c id */
u32 buf_len;
u32 data_len;
u32 phy_addr_l;
u32 phy_addr_h;
u8 cache;
u16 host_idx;
u8 h2c_seq; /* h2c seq */
};
/**
* the category of phl ring
*/
enum rtw_phl_ring_cat {
RTW_PHL_RING_CAT_TID0 = 0,
RTW_PHL_RING_CAT_TID1 = 1,
RTW_PHL_RING_CAT_TID2 = 2,
RTW_PHL_RING_CAT_TID3 = 3,
RTW_PHL_RING_CAT_TID4 = 4,
RTW_PHL_RING_CAT_TID5 = 5,
RTW_PHL_RING_CAT_TID6 = 6,
RTW_PHL_RING_CAT_TID7 = 7,
RTW_PHL_RING_CAT_MGNT = 8,
RTW_PHL_RING_CAT_HIQ = 9,
RTW_PHL_RING_CAT_MAX = 0xff
};
/**
* @RTW_PHL_TREQ_TYPE_PHL_UPDATE_TXSC:
* this is for phl tx shortcut entry to update
* @RTW_PHL_TREQ_TYPE_CORE_TXSC:
* it means this txreq is a shortcut pkt, so it need a txsc recycle
* @RTW_PHL_TREQ_TYPE_PHL_ADD_TXSC:
* it means this txreq is a new cache in core layer and also need cache
* in phl layer
*/
enum rtw_treq_type {
#if defined(CONFIG_CORE_TXSC) || defined(CONFIG_PHL_TXSC)
RTW_PHL_TREQ_TYPE_PHL_UPDATE_TXSC = 0x80,
#endif
RTW_PHL_TREQ_TYPE_NORMAL = 0,
RTW_PHL_TREQ_TYPE_TEST_PATTERN = 1,
#if defined(CONFIG_CORE_TXSC) || defined(CONFIG_PHL_TXSC)
RTW_PHL_TREQ_TYPE_CORE_TXSC = 2,
RTW_PHL_TREQ_TYPE_PHL_ADD_TXSC = 3,
#endif
RTW_PHL_TREQ_TYPE_MAX = 0xFF
};
enum rtw_packet_type {
RTW_PHL_PKT_TYPE_DATA = 0,
RTW_PHL_PKT_TYPE_MGNT = 1,
RTW_PHL_PKT_TYPE_H2C = 2,
RTW_PHL_PKT_TYPE_CTRL = 3,
RTW_PHL_PKT_TYPE_FWDL = 4,
RTW_PHL_PKT_TYPE_MAX = 0xFF
};
/**
* struct rtw_t_mdata_non_dcpu:
* this settings are only used in non-dcpu mode.
*/
struct rtw_t_mdata_non_dcpu {
u8 tbd;
};
/**
* struct rtw_t_mdata_dcpu:
* this settings are only used in dcpu mode.
*/
struct rtw_t_mdata_dcpu {
u8 tbd;
};
/**
* tx packet descrption
*
* @u: the union separates dpcu mode and non-dpcu mode unique settings
* @mac_priv: the mac private struture only used by HV tool.
* normal driver won't allocate memory for this pointer.
*/
struct rtw_t_meta_data {
/* basic */
u8 *ta;
u8 *ra;
u8 da[6];
u8 sa[6];
u8 to_ds;
u8 from_ds;
u8 band; /*0 or 1*/
u8 wmm; /*0 or 1*/
enum rtw_packet_type type;
u8 tid;
u8 bc;
u8 mc;
u16 pktlen; /* MAC header length + frame body length */
u16 macid;
u8 hal_port;
/* sequence */
u8 hw_seq_mode;
u8 hw_ssn_sel;
u16 sw_seq;
/* hdr conversion & hw amsdu */
u8 smh_en;
u8 hw_amsdu;
u8 hdr_len;
u8 wp_offset;
u8 shcut_camid;
u8 upd_wlan_hdr;
u8 reuse_start_num;
u8 reuse_size;
/* sec */
u8 hw_sec_iv;
u8 sw_sec_iv;
u8 sec_keyid;
u8 sec_cam_idx;
u8 sec_hw_enc;
u8 sec_type;
u8 force_key_en;
u8 iv[6];
/* dma */
u8 dma_ch;
u8 wd_page_size;
u8 wdinfo_en;
u8 addr_info_num;
u8 usb_pkt_ofst;
u8 usb_txagg_num;
/* ampdu */
u8 ampdu_en;
u8 max_agg_num;
u8 bk;
u8 ampdu_density;
/* rate */
u8 data_bw_er;
u8 f_ldpc;
u8 f_stbc;
u8 f_dcm;
u8 f_er;
u16 f_rate;
u8 f_gi_ltf;
u8 f_bw;
u8 userate_sel;
/* a ctrl */
u8 a_ctrl_bqr;
u8 a_ctrl_uph;
u8 a_ctrl_bsr;
u8 a_ctrl_cas;
/* tx cnt & rty rate */
u8 dis_rts_rate_fb;
u8 dis_data_rate_fb;
u16 data_rty_lowest_rate;
u8 data_tx_cnt_lmt;
u8 data_tx_cnt_lmt_en;
/* protection */
u8 rts_en;
u8 cts2self;
u8 rts_cca_mode;
u8 hw_rts_en;
/* misc */
u8 mbssid;
u8 nav_use_hdr;
u8 ack_ch_info;
u8 life_time_sel;
u8 no_ack;
u8 ndpa;
u8 snd_pkt_sel;
u8 sifs_tx;
u8 rtt_en;
u8 spe_rpt;
u8 raw;
u8 sw_define;
union {
struct rtw_t_mdata_non_dcpu non_dcpu;
struct rtw_t_mdata_dcpu dcpu;
} u;
void *mac_priv;
};
/**
* packet recv information
*/
struct rtw_r_meta_data {
u8 dma_ch;
u8 hal_port;
u8 ta[6]; /* Transmitter Address */
u8 ppdu_cnt_chg;
#ifdef CONFIG_PHL_CSUM_OFFLOAD_RX
u8 chksum_status; /*return mac_chk_rx_tcpip_chksum_ofd,0 is ok ,1 is fail*/
#endif
u16 pktlen; /* DW0 [0:13] */
u8 shift; /* DW0 [14:15] */
u8 wl_hd_iv_len; /* DW0 [16:21] */
u8 bb_sel; /* DW0 [22:22] */
u8 mac_info_vld; /* DW0 [23:23] */
u8 rpkt_type; /* DW0 [24:27] */
u8 drv_info_size; /* DW0 [28:30] */
u8 long_rxd; /* DW0 [31:31] */
u8 ppdu_type; /* DW1 [0:3] */
u8 ppdu_cnt; /* DW1 [4:6] */
u8 sr_en; /* DW1 [7:7] */
u8 user_id; /* DW1 [8:15] */
u16 rx_rate; /* DW1 [16:24] */
u8 rx_gi_ltf; /* DW1 [25:27] */
u8 non_srg_ppdu; /* DW1 [28:28] */
u8 inter_ppdu; /* DW1 [29:29] */
u8 bw; /* DW1 [30:31] */
u32 freerun_cnt; /* DW2 [0:31] */
u8 a1_match; /* DW3 [0:0] */
u8 sw_dec; /* DW3 [1:1] */
u8 hw_dec; /* DW3 [2:2] */
u8 ampdu; /* DW3 [3:3] */
u8 ampdu_end_pkt; /* DW3 [4:4] */
u8 amsdu; /* DW3 [5:5] */
u8 amsdu_cut; /* DW3 [6:6] */
u8 last_msdu; /* DW3 [7:7] */
u8 bypass; /* DW3 [8:8] */
u8 crc32; /* DW3 [9:9] */
u8 icverr; /* DW3 [10:10] */
u8 magic_wake; /* DW3 [11:11] */
u8 unicast_wake; /* DW3 [12:12] */
u8 pattern_wake; /* DW3 [13:13] */
u8 get_ch_info; /* DW3 [14:15] */
u8 pattern_idx; /* DW3 [16:20] */
u8 target_idc; /* DW3 [21:23] */
u8 chksum_ofld_en; /* DW3 [24:24] */
u8 with_llc; /* DW3 [25:25] */
u8 rx_statistics; /* DW3 [26:26] */
u8 frame_type; /* DW4 [0:1] */
u8 mc; /* DW4 [2:2] */
u8 bc; /* DW4 [3:3] */
u8 more_data; /* DW4 [4:4] */
u8 more_frag; /* DW4 [5:5] */
u8 pwr_bit; /* DW4 [6:6] */
u8 qos; /* DW4 [7:7] */
u8 tid; /* DW4 [8:11] */
u8 eosp; /* DW4 [12:12] */
u8 htc; /* DW4 [13:13] */
u8 q_null; /* DW4 [14:14] */
u16 seq; /* DW4 [16:27] */
u8 frag_num; /* DW4 [28:31] */
u8 sec_cam_idx; /* DW5 [0:7] */
u8 addr_cam; /* DW5 [8:15] */
u16 macid; /* DW5 [16:23] */
u8 rx_pl_id; /* DW5 [24:27] */
u8 addr_cam_vld; /* DW5 [28:28] */
u8 addr_fwd_en; /* DW5 [29:29] */
u8 rx_pl_match; /* DW5 [30:30] */
u8 mac_addr[6]; /* DW6 [0:31] DW7 [0:15] */
u8 smart_ant; /* DW7 [16:16] */
u8 sec_type; /* DW7 [17:20] */
};
/**
* rtw_pkt_buf_list -- store pakcet from upper layer(ex. ndis, kernel, ethernet..)
* @vir_addr: virtual address of this packet
* @phy_addr_l: lower 32-bit physical address of this packet
* @phy_addr_h: higher 32-bit physical address of this packet
* @length: length of this packet
* @type: tbd
*/
struct rtw_pkt_buf_list {
u8 *vir_addr;
u32 phy_addr_l;
u32 phy_addr_h;
u16 length;
};
enum rtw_tx_status {
TX_STATUS_TX_DONE,
TX_STATUS_TX_FAIL_REACH_RTY_LMT,
TX_STATUS_TX_FAIL_LIFETIME_DROP,
TX_STATUS_TX_FAIL_MACID_DROP,
TX_STATUS_TX_FAIL_SW_DROP,
TX_STATUS_TX_FAIL_MAX
};
#ifdef CONFIG_PHL_TX_DBG
typedef
void
(*CORE_TX_HANDLE_CALLBACK)
(
void *drv_priv,
void *pctx,
bool btx_ok
);
/**
* @en_dbg: if en_dbg = true, phl tx will print tx dbg info for this dbg pkt. set the flag from core layer.
* @tx_dbg_pkt_type: Identification type, define by core layer
* @core_add_tx_t: core layer add tx req to phl time
* @enq_pending_wd_t: phl tx enqueue pending wd page time
* @recycle_wd_t: phl tx handle the wp report and recycle wd time
*/
struct rtw_tx_dbg {
bool en_dbg;
u16 tx_dbg_pkt_type;
u32 core_add_tx_t;
u32 enq_pending_wd_t;
u32 recycle_wd_t;
CORE_TX_HANDLE_CALLBACK statecb;
void *pctx;
};
#endif /* CONFIG_PHL_TX_DBG */
/**
* context for tx feedback handler
* @drvpriv: driver private
* @ctx: private context
* @id: module id of this tx packet
* @txsts: detail tx status
* @txfb_cb: tx feedback handler, currently assign by core layer
*/
struct rtw_txfb_t {
void *drvpriv;
void *ctx;
enum phl_module_id id;
enum rtw_tx_status txsts;
void (*txfb_cb)(struct rtw_txfb_t *txfb);
};
/**
* the xmit request from core layer, store in xmit phl ring
* @list: list
* @os_priv: the private context from core layer
* @mdata: see structure rtw_t_meta_data
* @tx_time: xmit requset tx time, unit in ms
* @shortcut_id: short cut id this packet will use in phl/hal
* @total_len: the total length of pkt_list
* @pkt_cnt: the packet number of pkt_list
* @pkt_list: see structure rtw_pkt_buf_list
* @txfb: tx feedback context
*
* Note, this structure are visible to core, phl and hal layer
*/
struct rtw_xmit_req {
_os_list list;
void *os_priv;
enum rtw_treq_type treq_type;
struct rtw_t_meta_data mdata;
u32 tx_time;
u8 shortcut_id;
u32 total_len;
u8 pkt_cnt;
u8 *pkt_list;
struct rtw_txfb_t *txfb;
#ifdef CONFIG_PHL_TX_DBG
struct rtw_tx_dbg tx_dbg;
#endif /* CONFIG_PHL_TX_DBG */
};
/**
* the recv packet to core layer, store in recv phl ring
* @os_priv: the private context from core layer
* @mdata: see structure rtw_r_meta_data
* @shortcut_id: short cut id this packet will use in phl/hal
* @pkt_cnt: the packet counts of pkt_list
* @rx_role: the role to which the RX packet is targeted
* @tx_sta: the phl sta that sends this packet
* @pkt_list: see structure rtw_pkt_buf_list
*
* Note, this structure are visible to core, phl and hal layer
*/
struct rtw_recv_pkt {
void *os_priv;
struct rtw_r_meta_data mdata;
u8 shortcut_id;
u8 pkt_cnt;
u16 os_netbuf_len;
struct rtw_wifi_role_t *rx_role;
struct rtw_phl_stainfo_t *tx_sta;
struct rtw_pkt_buf_list pkt_list[MAX_RX_BUF_SEG_NUM];
struct rtw_phl_ppdu_phy_info phy_info;
};
/**
* the phl ring which stores XMIT requests can be access by both
* core and phl, and all the requests in this ring have the same TID value
* @tid: the TID value of this phl ring
* @dma_ch: dma channel of this phl ring, query by rtw_hal_tx_chnl_mapping()
* @tx_thres: tx threshold of this phl ring for batch handling tx requests
* @core_idx: record the index of latest entry accessed by core layer
* @phl_idx: record the index of handling done by phl layer
* @phl_next_idx: record the index of latest entry accessed by phl layer
* @entry: store the pointer of requests assigned to this phl ring
*/
struct rtw_phl_tx_ring {
u8 tid;
u8 dma_ch;
u16 tx_thres;
u16 core_idx;
_os_atomic phl_idx;
_os_atomic phl_next_idx;
u8 *entry[MAX_PHL_RING_ENTRY_NUM];/* change to dynamic allocation */
};
/**
* this structure stores sorted tx rings having frames to tx to the same sta
* it will change everytime _phl_check_tring_list() executed
* @list: link to the next sta which has frames to transmit
* @sleep: true if this macid is under power-saving mode
* @has_mgnt: true if this macid has management frames to transmit
* @has_hiq: true if this macid has hiq frames to transmit
* @sorted_ring: pre-sorted phl ring status list of this macid
*/
struct phl_tx_plan {
_os_list list;
bool sleep;
bool has_mgnt;
bool has_hiq;
_os_list sorted_ring;
};
/**
* this phl ring list contains a list of phl TX rings that have the same macid
* and different tid, and it can be access by both core and phl
* @list: link to next phl ring list with other macid
* @macid: the MACID value of this phl ring list
* @band: band of this phl ring list, band idx 0~1
* @wmm: wmm of this phl ring list, wmm idx 0~1
* @port: port of this phl ring list, port idx 0~4
* @mbssid: TODO
* @phl_ring: the phl rings with same macid but different tid, see rtw_phl_tx_ring
* @tx_plan: transmission plan for this macid, decide by _phl_check_tring_list()
*/
struct rtw_phl_tring_list {
_os_list list;
u16 macid;
u8 band;/*0 or 1*/
u8 wmm;/*0 or 1*/
u8 port;
/*u8 mbssid*/
struct rtw_phl_tx_ring phl_ring[MAX_PHL_RING_CAT_NUM];/* tid 0~7, 8:mgnt, 9:hiq */
struct phl_tx_plan tx_plan;
};
/**
* this phl RX ring can be access by both core and phl
* @core_idx: record the index of latest entry accessed by core layer
* @phl_idx: record the index of handling done by phl layer
* @entry: store the pointer of requests assigned to this phl ring
*/
struct rtw_phl_rx_ring {
_os_atomic core_idx;
_os_atomic phl_idx;
struct rtw_recv_pkt *entry[MAX_PHL_RING_ENTRY_NUM];/* change to dynamic allocation */
};
/**
* the physical address list
*/
struct rtw_phy_addr_list {
_os_list list;
u32 phy_addr_l;
u32 phy_addr_h;
};
/**
* the phl pkt tx request from phl layer to hal layer
* @wd_page: the buffer of wd page allocated by phl and filled by hal
* @wd_len: the phl tx shortcut cached wd_page length, if wd_len = 0 means no phl txsc
* @wp_seq: pcie only, wp sequence of this phl packet request
* @tx_req: see struct rtw_xmit_req
*
* Note, this structure should be visible to phl and hal layer (hana_todo)
*/
struct rtw_phl_pkt_req {
u8 *wd_page;
u8 wd_len;
u16 wp_seq;
struct rtw_xmit_req *tx_req;
};
/*
0000: WIFI packet
0001: PPDU status
0010: channel info
0011: BB scope mode
0100: F2P TX CMD report
0101: SS2FW report
0110: TX report
0111: TX payload release to host
1000: DFS report
1001: TX payload release to WLCPU
1010: C2H packet */
enum rtw_rx_type {
RTW_RX_TYPE_WIFI = 0,
RTW_RX_TYPE_PPDU_STATUS = 1,
RTW_RX_TYPE_CHANNEL_INFO = 2,
RTW_RX_TYPE_TX_RPT = 3,
RTW_RX_TYPE_TX_WP_RELEASE_HOST = 4,
RTW_RX_TYPE_DFS_RPT = 5,
RTW_RX_TYPE_C2H = 6,
RTW_RX_TYPE_MAX = 0xFF
};
struct rtw_phl_rx_pkt {
_os_list list;
enum rtw_rx_type type;
u8 *rxbuf_ptr;
struct rtw_recv_pkt r;
};
struct rtw_xmit_recycle {
u16 wp_seq;
struct rtw_xmit_req *tx_req;
};
enum rtw_traffic_dir {
TRAFFIC_UL = 0, /* Uplink */
TRAFFIC_DL, /* Downlink */
TRAFFIC_BALANCE,
TRAFFIC_MAX
};
enum rtw_rx_fltr_mode {
RX_FLTR_MODE_SNIFFER, /* 0 */
RX_FLTR_MODE_SCAN,
RX_FLTR_MODE_STA_LINKING,
RX_FLTR_MODE_STA_NORMAL,
RX_FLTR_MODE_AP_NORMAL,
RX_FLTR_MODE_RESTORE = 0xFF
};
#endif /* __PHL_TRX_DEF_H_ */
|
2301_81045437/rtl8852be
|
phl/phl_trx_def.h
|
C
|
agpl-3.0
| 14,754
|
/******************************************************************************
*
* Copyright(c) 2019 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#define _PHL_TRX_MIT_C_
#include "phl_headers.h"
#if defined(CONFIG_PCI_HCI) && defined(PCIE_TRX_MIT_EN)
enum rtw_phl_status phl_pcie_trx_mit_start(struct phl_info_t *phl_info,
u8 dispr_idx)
{
struct rtw_pcie_trx_mit_info_t info = {0};
if (dispr_idx != HW_BAND_0)
return RTW_PHL_STATUS_SUCCESS;
PHL_INFO("%s :: pcie trx interrupt mitigation off\n", __func__);
if (RTW_HAL_STATUS_SUCCESS !=
rtw_hal_pcie_trx_mit(phl_info->hal, info.tx_timer, info.tx_counter,
info.rx_timer, info.rx_counter))
return RTW_PHL_STATUS_FAILURE;
return RTW_PHL_STATUS_SUCCESS;
}
enum rtw_phl_status
phl_evt_pcie_trx_mit_hdlr(struct phl_info_t *phl_info, u8 *mit_info)
{
struct rtw_pcie_trx_mit_info_t *info = (struct rtw_pcie_trx_mit_info_t *)mit_info;
PHL_INFO("%s :: tx_timer == %d us, tx_counter = %d, rx_timer == %d us, "
"rx_counter = %d, fixed_mitigation=%d\n",
__func__, info->tx_timer, info->tx_counter, info->rx_timer,
info->rx_counter, info->fixed_mitigation);
if (RTW_HAL_STATUS_SUCCESS !=
rtw_hal_pcie_trx_mit(phl_info->hal, info->tx_timer,
info->tx_counter, info->rx_timer,
info->rx_counter))
return RTW_PHL_STATUS_FAILURE;
phl_info->hci->fixed_mitigation = info->fixed_mitigation;
return RTW_PHL_STATUS_SUCCESS;
}
static void _phl_pcie_trx_mit_done(void *drv_priv, u8 *cmd, u32 cmd_len, enum rtw_phl_status status)
{
if (cmd) {
_os_mem_free(drv_priv, cmd, cmd_len);
cmd = NULL;
}
}
static enum rtw_phl_status
phl_pcie_trx_mit(struct phl_info_t *phl_info,
u32 tx_timer, u8 tx_counter, u32 rx_timer, u8 rx_counter)
{
#ifdef CONFIG_CMD_DISP
void *drv_priv = phl_to_drvpriv(phl_info);
struct rtw_pcie_trx_mit_info_t *info = NULL;
enum rtw_phl_status psts = RTW_PHL_STATUS_FAILURE;
u32 info_len = sizeof(struct rtw_pcie_trx_mit_info_t);
info = _os_mem_alloc(drv_priv, info_len);
if (info == NULL) {
PHL_ERR("%s: alloc mit_info failed!\n", __func__);
goto _exit;
}
info->tx_timer = tx_timer;
info->tx_counter = tx_counter;
info->rx_timer = rx_timer;
info->rx_counter = rx_counter;
psts = phl_cmd_enqueue(phl_info,
HW_BAND_0,
MSG_EVT_PCIE_TRX_MIT,
(u8 *)info,
info_len,
_phl_pcie_trx_mit_done,
PHL_CMD_NO_WAIT,
0);
if (is_cmd_failure(psts)) {
/* Send cmd success, but wait cmd fail*/
psts = RTW_PHL_STATUS_FAILURE;
} else if (psts != RTW_PHL_STATUS_SUCCESS) {
/* Send cmd fail */
psts = RTW_PHL_STATUS_FAILURE;
_os_mem_free(drv_priv, info, info_len);
}
_exit:
return psts;
#else
PHL_ERR("phl_fsm not support %s\n", __func__);
return RTW_PHL_STATUS_FAILURE;
#endif /*CONFIG_CMD_DISP*/
}
void phl_pcie_trx_mit_watchdog(struct phl_info_t *phl_info)
{
static enum rtw_tfc_lvl rx_traffic_lvl = RTW_TFC_IDLE;
struct rtw_stats *phl_stats = &phl_info->phl_com->phl_stats;
if (phl_info->hci->fixed_mitigation == 1)
return;
if (rx_traffic_lvl == phl_stats->rx_traffic.lvl)
return;
rx_traffic_lvl = phl_stats->rx_traffic.lvl;
if (rx_traffic_lvl == RTW_TFC_HIGH)
phl_pcie_trx_mit(phl_info, 0, 0, 100000, 200);
else
phl_pcie_trx_mit(phl_info, 0, 0, 0, 0);
}
#endif /*defined(CONFIG_PCI_HCI) && defined(PCIE_TRX_MIT_EN)*/
|
2301_81045437/rtl8852be
|
phl/phl_trx_mit.c
|
C
|
agpl-3.0
| 3,945
|
/******************************************************************************
*
* Copyright(c) 2019 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#ifndef _PHL_TRX_MIT_H_
#define _PHL_TRX_MIT_H_
#if defined(CONFIG_PCI_HCI) && defined(PCIE_TRX_MIT_EN)
enum rtw_phl_status phl_pcie_trx_mit_start(struct phl_info_t *phl_info,
u8 dispr_idx);
enum rtw_phl_status phl_evt_pcie_trx_mit_hdlr(struct phl_info_t *phl_info,
u8 *mit_info);
void phl_pcie_trx_mit_watchdog(struct phl_info_t *phl_info);
#endif
#endif /*_PHL_CMD_GENERAL_H_*/
|
2301_81045437/rtl8852be
|
phl/phl_trx_mit.h
|
C
|
agpl-3.0
| 1,052
|
/******************************************************************************
*
* Copyright(c) 2019 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#define _PHL_TWT_C_
#include "phl_headers.h"
#ifdef CONFIG_PHL_TWT
#include "phl_twt.h"
void _twt_transfer_config_state(enum phl_twt_action action,
enum twt_config_state *state)
{
if (PHL_TWT_ACTION_FREE == action)
*state = twt_config_state_free;
else if (PHL_TWT_ACTION_ALLOC == action)
*state = twt_config_state_idle;
else if (PHL_TWT_ACTION_ENABLE == action)
*state = twt_config_state_enable;
else if (PHL_TWT_ACTION_DISABLE == action)
*state = twt_config_state_idle;
else if (PHL_TWT_ACTION_UP_ERROR == action)
*state = twt_config_state_error;
}
/*
* Calculate map of macid
* @map_offset: number of offset for wait_macid_map
* @macid_map: map of macid
* Ex: macid_map = 0x80(bit7), offset = 2, macid 71(7 + 2*32) wait announce
*/
void _twt_calc_macid_map_info(u16 macid, u8 *map_offset, u32 *macid_map)
{
*map_offset = (u8)(macid / 32);
*macid_map = BIT(macid % 32);
PHL_TRACE(COMP_PHL_TWT, _PHL_INFO_, "_twt_calc_macid_map_info(): macid:%d, map_offset:%d, macid_map:0x%x\n",
macid, *map_offset, *macid_map);
}
u32 _twt_calc_intvl(u8 exp, u16 mantissa)
{
u32 intvl = 0;
intvl = mantissa * (1 << exp);
PHL_TRACE(COMP_PHL_TWT, _PHL_INFO_, "_twt_calc_intvl(): exp:%u, mantissa:%u, intvl=%u\n",
exp, mantissa, intvl);
return intvl;
}
u32 _twt_calc_wakeup_dur(u8 dur, enum rtw_phl_wake_dur_unit dur_unit)
{
u32 dur_t = 0;
if (RTW_PHL_WAKE_256US == dur_unit)
dur_t = dur * 256;
else if (RTW_PHL_WAKE_1TU == dur_unit)
dur_t = dur * 1024;
return dur_t;
}
enum rtw_phl_status _twt_fill_individual_twt_para_set(
struct rtw_phl_indiv_twt_para_set *para,
bool ndp_paging, u8 *buf, u8 *length)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
struct rtw_phl_req_type_indiv *req_type = ¶->req_type;
PHL_TRACE(COMP_PHL_TWT, _PHL_INFO_, "_fill_individual_twt_para_set(): twt_request(%d), twt_setup_cmd(%d), trigger(%d), implicit(%d), flow_type(%d), twt_flow_id(%d), twt_wake_int_exp(%d), twt_protection(%d)\n",
req_type->twt_request, req_type->twt_setup_cmd,
req_type->trigger, req_type->implicit,
req_type->flow_type, req_type->twt_flow_id,
req_type->twt_wake_int_exp, req_type->twt_protection);
PHL_TRACE(COMP_PHL_TWT, _PHL_INFO_, "_fill_individual_twt_para_set(): target_wake_t_h(0x%08x), target_wake_t_l(0x%08x), nom_min_twt_wake_dur(%d), twt_wake_int_mantissa(%d), twt_channel(%d)\n",
para->target_wake_t_h, para->target_wake_t_l,
para->nom_min_twt_wake_dur, para->twt_wake_int_mantissa,
para->twt_channel);
*length = 0;
/*Request Type*/
SET_TWT_REQ_TYPE_TWT_REQUEST(buf, req_type->twt_request);
SET_TWT_REQ_TYPE_TWT_SETUP_COMMAND(buf, req_type->twt_setup_cmd);
SET_TWT_REQ_TYPE_TRIGGER(buf, req_type->trigger);
SET_TWT_REQ_TYPE_IMPLICIT(buf, req_type->implicit);
SET_TWT_REQ_TYPE_FLOW_TYPE(buf, req_type->flow_type);
SET_TWT_REQ_TYPE_TWT_FLOW_IDENTIFER(buf, req_type->twt_flow_id);
SET_TWT_REQ_TYPE_TWT_WAKE_INTERVAL_EXPONENT(buf,
req_type->twt_wake_int_exp);
SET_TWT_REQ_TYPE_TWT_PROTECTION(buf, req_type->twt_protection);
*length += REQUEST_TYPE_LENGTH;
if (RTW_PHL_TWT_GROUPING == req_type->twt_setup_cmd) {
/*TODO*/
} else {
SET_TWT_TARGET_WAKE_TIME_L(buf, para->target_wake_t_l);
SET_TWT_TARGET_WAKE_TIME_H(buf, para->target_wake_t_h);
*length += TARGET_WAKE_TIME_LENGTH;
}
SET_TWT_NOMINAL_MINIMUM_TWT_WAKE_DURATION(buf, *length,
para->nom_min_twt_wake_dur);
*length += NOMINAL_MINIMUM_TWT_WAKE_DURATION_LENGTH;
SET_TWT_TWT_WAKE_INTERVAL_MANTISSA(buf, *length,
para->twt_wake_int_mantissa);
*length += TWT_WAKE_INTERVAL_MANTISSA_LENGTH;
SET_TWT_TWT_CHANNEL(buf, *length, para->twt_channel);
*length += TWT_CHANNEL_LENGTH;
if (true == ndp_paging) {
/*TODO*/
}
pstatus = RTW_PHL_STATUS_SUCCESS;
return pstatus;
}
enum rtw_phl_status _twt_parse_individual_twt_para(u8 *twt_ele, u16 length,
struct rtw_phl_twt_element *element)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
struct rtw_phl_indiv_twt_para_set *para = &element->info.i_twt_para_set;
struct rtw_phl_req_type_indiv *req_type = ¶->req_type;
u8 *next_buf = twt_ele + ELEM_ID_LEN + ELEM_LEN_LEN + CONTROL_LENGTH;
req_type->twt_request = GET_TWT_REQ_TYPE_TWT_REQUEST(next_buf);
req_type->twt_setup_cmd = GET_TWT_REQ_TYPE_TWT_SETUP_COMMAND(next_buf);
req_type->trigger = GET_TWT_REQ_TYPE_TRIGGER(next_buf);
req_type->implicit = GET_TWT_REQ_TYPE_IMPLICIT(next_buf);
req_type->flow_type = GET_TWT_REQ_TYPE_FLOW_TYPE(next_buf);
req_type->twt_flow_id = GET_TWT_REQ_TYPE_TWT_FLOW_IDENTIFER(next_buf);
req_type->twt_wake_int_exp =
GET_TWT_REQ_TYPE_TWT_WAKE_INTERVAL_EXPONENT(next_buf);
req_type->twt_protection = GET_TWT_REQ_TYPE_TWT_PROTECTION(next_buf);
next_buf += REQUEST_TYPE_LENGTH;
if (RTW_PHL_TWT_GROUPING == req_type->twt_setup_cmd) {
//Todo
} else {
para->target_wake_t_l = GET_TWT_TARGET_WAKE_TIME_L(next_buf);
para->target_wake_t_h = GET_TWT_TARGET_WAKE_TIME_H(next_buf);
next_buf += TARGET_WAKE_TIME_LENGTH;
}
para->nom_min_twt_wake_dur =
GET_TWT_NOMINAL_MINIMUM_TWT_WAKE_DURATION(next_buf);
next_buf += NOMINAL_MIN_TWT_WAKE_DURATION_LENGTH;
para->twt_wake_int_mantissa =
GET_TWT_TWT_WAKE_INTERVAL_MANTISSA(next_buf);
next_buf += TWT_WAKE_INTERVAL_MANTISSA_LENGTH;
para->twt_channel = GET_TWT_TWT_CHANNEL(next_buf);
next_buf += TWT_CHANNEL_LENGTH;
if (element->twt_ctrl.ndp_paging_indic) {
/*TODO*/
}
PHL_TRACE(COMP_PHL_TWT, _PHL_INFO_, "_parse_individual_twt_para(): twt_request:%d, twt_setup_cmd:%d, trigger:%d, implicit:%d, flow_type:%d, twt_flow_id:%d, twt_wake_int_exp:%d, twt_protection:%d\n",
req_type->twt_request, req_type->twt_setup_cmd,
req_type->trigger, req_type->implicit,
req_type->flow_type, req_type->twt_flow_id,
req_type->twt_wake_int_exp, req_type->twt_protection);
PHL_TRACE(COMP_PHL_TWT, _PHL_INFO_, "_parse_individual_twt_para(): target_wake_t_h:0x%08X, target_wake_t_l:0x%08X, nom_min_twt_wake_dur:%d, twt_wake_int_mantissa:%d, twt_channel:%d\n",
para->target_wake_t_h, para->target_wake_t_l,
para->nom_min_twt_wake_dur, para->twt_wake_int_mantissa,
para->twt_channel);
pstatus = RTW_PHL_STATUS_SUCCESS;
return pstatus;
}
enum rtw_phl_status _twt_announce_info_enqueue(struct phl_info_t *phl_info,
struct phl_queue *twt_annc_q,
struct _twt_announce_info *twt_annc)
{
void *drv = phl_to_drvpriv(phl_info);
_os_spinlockfg sp_flags;
if (!twt_annc)
return RTW_PHL_STATUS_FAILURE;
_os_spinlock(drv, &twt_annc_q->lock, _irq, &sp_flags);
list_add_tail(&twt_annc->list, &twt_annc_q->queue);
twt_annc_q->cnt++;
_os_spinunlock(drv, &twt_annc_q->lock, _irq, &sp_flags);
return RTW_PHL_STATUS_SUCCESS;
}
struct _twt_announce_info * _twt_announce_info_dequeue(
struct phl_info_t *phl_info,
struct phl_queue *twt_annc_q)
{
struct _twt_announce_info *twt_annc = NULL;
void *drv = phl_to_drvpriv(phl_info);
_os_spinlockfg sp_flags;
_os_spinlock(drv, &twt_annc_q->lock, _irq, &sp_flags);
if (list_empty(&twt_annc_q->queue)) {
twt_annc = NULL;
} else {
twt_annc = list_first_entry(&twt_annc_q->queue,
struct _twt_announce_info, list);
list_del(&twt_annc->list);
twt_annc_q->cnt--;
}
_os_spinunlock(drv, &twt_annc_q->lock, _irq, &sp_flags);
return twt_annc;
}
enum rtw_phl_status _twt_sta_announce(struct phl_info_t *phl_info,
struct phl_queue *annc_queue, u16 macid)
{
enum rtw_hal_status hstatus = RTW_HAL_STATUS_FAILURE;
enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
void *drv = phl_to_drvpriv(phl_info);
_os_spinlockfg sp_flags;
struct _twt_announce_info *info = NULL;
_os_list *annc_list = &annc_queue->queue;
u8 offset = 0;
u32 macid_map = 0;
_twt_calc_macid_map_info(macid, &offset, &macid_map);
_os_spinlock(drv, &annc_queue->lock, _irq, &sp_flags);
phl_list_for_loop(info, struct _twt_announce_info, annc_list, list) {
if (NULL == info)
break;
if (info->map_offset != offset)
continue;
if (!(info->wait_macid_map & macid_map))
continue;
hstatus = rtw_hal_twt_sta_announce(phl_info->hal, (u8)macid);
if (RTW_HAL_STATUS_SUCCESS == hstatus) {
info->wait_macid_map &= (~macid_map);
pstatus = RTW_PHL_STATUS_SUCCESS;
PHL_TRACE(COMP_PHL_TWT, _PHL_INFO_, "_twt_sta_announce(): rtw_hal_twt_sta_announce success, macid:%d, map_offset:%d, wait_macid_map:0x%x\n",
macid, info->map_offset, info->wait_macid_map);
} else {
PHL_TRACE(COMP_PHL_TWT, _PHL_WARNING_, "_twt_sta_announce(): rtw_hal_twt_sta_announce fail, macid:%d, map_offset:%d, wait_macid_map:0x%x\n",
macid, info->map_offset, info->wait_macid_map);
}
break;
}
_os_spinunlock(drv, &annc_queue->lock, _irq, &sp_flags);
return pstatus;
}
enum rtw_phl_status _twt_set_sta_announce_state(struct phl_info_t *phl_info,
struct phl_queue *annc_q, u16 macid,
enum phl_wait_annc_type type)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
void *drv = phl_to_drvpriv(phl_info);
_os_spinlockfg sp_flags;
struct _twt_announce_info *info = NULL;
_os_list *annc_list = &annc_q->queue;
u8 offset = 0;
u32 macid_map = 0;
u8 bset = false;
_twt_calc_macid_map_info(macid, &offset, &macid_map);
_os_spinlock(drv, &annc_q->lock, _irq, &sp_flags);
phl_list_for_loop(info, struct _twt_announce_info, annc_list, list) {
if (NULL == info)
break;
if (info->map_offset != offset)
continue;
if (PHL_WAIT_ANNC_ENABLE == type) {
info->wait_macid_map |= macid_map;
PHL_TRACE(COMP_PHL_TWT, _PHL_INFO_, "_twt_set_sta_announce_state(): set macid:%d to wait annc state, map_offset:%d, wait_macid_map:0x%x\n",
macid, info->map_offset, info->wait_macid_map);
} else if (PHL_WAIT_ANNC_DISABLE == type) {
info->wait_macid_map &= (~macid_map);
PHL_TRACE(COMP_PHL_TWT, _PHL_INFO_, "_twt_set_sta_announce_state(): set macid:%d to annc state, map_offset:%d, wait_macid_map:0x%x\n",
macid, info->map_offset, info->wait_macid_map);
} else {
PHL_TRACE(COMP_PHL_TWT, _PHL_WARNING_, "_twt_set_sta_announce_state(): Unknown type:%d\n",
type);
break;
}
pstatus = RTW_PHL_STATUS_SUCCESS;
bset = true;
break;
}
_os_spinunlock(drv, &annc_q->lock, _irq, &sp_flags);
if (true == bset)
goto exit;
if (PHL_WAIT_ANNC_ENABLE == type) {
info = _os_mem_alloc(drv, sizeof(struct _twt_announce_info));
if (NULL == info) {
PHL_TRACE(COMP_PHL_TWT, _PHL_WARNING_, "_twt_sta_wait_announce(): Fail to alloc new annc info\n");
} else {
info->map_offset = offset;
info->wait_macid_map = macid_map;
_twt_announce_info_enqueue(phl_info, annc_q, info);
pstatus = RTW_PHL_STATUS_SUCCESS;
PHL_TRACE(COMP_PHL_TWT, _PHL_INFO_, "_twt_set_sta_announce_state(): add new Q and set macid:%d to annc state, map_offset:%d, wait_macid_map:0x%x\n",
macid, info->map_offset, info->wait_macid_map);
}
} else if (PHL_WAIT_ANNC_DISABLE == type) {
PHL_TRACE(COMP_PHL_TWT, _PHL_WARNING_, "_twt_set_sta_announce_state(): macid:%d is not in wait annc state\n",
macid);
} else {
/*nothing*/
}
exit:
return pstatus;
}
#if 0
/*
* Get macid of sta wait for announce form FW
* @wait_case: C2HTWT_ANNOUNCE_WAIT_DISABLE_MACID = 0, C2HTWT_ANNOUNCE_WAIT_ENABLE_MACID = 1
* @macid0: macid of sta
* @macid1: macid of sta
* @macid2: macid of sta
*/
enum rtw_phl_status _twt_handle_c2h_wait_annc(struct phl_info_t *phl_info,
u8 *content)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
struct phl_twt_info *phl_twt_info = get_twt_info(phl_info);
struct phl_queue *annc_q = &phl_twt_info->twt_annc_queue;
u8 wait_case = 0, macid = 0;
u8 i = 0;
bool error = false;
wait_case = (*((u8*)content)) & 0xf; /*BIT0-3*/
PHL_TRACE(COMP_PHL_TWT, _PHL_INFO_, "_twt_handle_c2h_wait_annc(): content:0x%X, wait_case:%d, macid0:%d, macid1:%d, macid2:%d\n",
*content, wait_case, *((u8*)content + 1), *((u8*)content + 2),
*((u8*)content + 3));
for (i = 1; i < 4; i++) {
macid = *((u8*)content + i);
if (IGNORE_MACID == macid)
continue;
pstatus = _twt_set_sta_announce_state(phl_info, annc_q, macid,
wait_case);
if (RTW_PHL_STATUS_SUCCESS != pstatus) {
PHL_TRACE(COMP_PHL_TWT, _PHL_INFO_, "_twt_handle_c2h_wait_annc(): pstatus:%d, macid: %d, fail to set sta wait announce state\n",
pstatus, macid);
error = true;
}
}
if (true == error)
pstatus = RTW_PHL_STATUS_FAILURE;
return pstatus;
}
#endif
/*
struct rtw_twt_sta_info *
_twt_get_twt_sta(
struct phl_info_t *phl_info,
struct phl_queue *sta_queue,
struct rtw_phl_stainfo_t *phl_sta,
u8 id
)
{
void *drv = phl_to_drvpriv(phl_info);
struct rtw_twt_sta_info *twt_sta = NULL, *ret_twt_sta = NULL;
_os_list *sta_list = &sta_queue->queue;
PHL_TRACE(COMP_PHL_TWT, _PHL_INFO_, "==> _twt_get_twt_sta()\n");
_os_spinlock(drv, &sta_queue->lock, _bh, NULL);
phl_list_for_loop(twt_sta, struct rtw_twt_sta_info, sta_list, list) {
if (twt_sta == NULL)
break;
if (phl_sta != twt_sta->phl_sta)
continue;
if (DELETE_ALL != id && id != twt_sta->id)
continue;
ret_twt_sta = twt_sta;
}
_os_spinunlock(drv, &sta_queue->lock, _bh, NULL);
PHL_TRACE(COMP_PHL_TWT, _PHL_INFO_, "<== _twt_get_twt_sta()\n");
return ret_twt_sta;
}
*/
void _twt_fill_config_info_indiv(struct rtw_phl_twt_info *twt_info,
struct rtw_phl_indiv_twt_para_set *para_set)
{
struct rtw_phl_req_type_indiv *req_type = ¶_set->req_type;
twt_info->trigger = req_type->trigger;
twt_info->flow_type = req_type->flow_type;
twt_info->implicit_lastbcast = req_type->implicit;
twt_info->twt_protection = req_type->twt_protection;
twt_info->twt_wake_int_exp = req_type->twt_wake_int_exp;
twt_info->twt_wake_int_mantissa = para_set->twt_wake_int_mantissa;
twt_info->nom_min_twt_wake_dur = para_set->nom_min_twt_wake_dur;
twt_info->target_wake_time_h = para_set->target_wake_t_h;
twt_info->target_wake_time_l = para_set->target_wake_t_l;
PHL_TRACE(COMP_PHL_TWT, _PHL_INFO_, "_twt_fill_config_info_indiv(): twt_info: trigger:%d, flow_type:%d, implicit_lastbcast:%d, twt_protection:%d, twt_wake_int_exp:%d, twt_wake_int_mantissa:%d, nom_min_twt_wake_dur:%d, target_wake_time_h:0x%08X, target_wake_time_l:0x%08X\n",
twt_info->trigger, twt_info->flow_type,
twt_info->implicit_lastbcast, twt_info->twt_protection,
twt_info->twt_wake_int_exp, twt_info->twt_wake_int_mantissa,
twt_info->nom_min_twt_wake_dur, twt_info->target_wake_time_h,
twt_info->target_wake_time_l);
}
void _twt_fill_config_info(struct rtw_phl_twt_info *twt_info,
struct rtw_phl_twt_setup_info *setup_info)
{
struct rtw_phl_twt_element *twt_ele = &setup_info->twt_element;
struct rtw_phl_twt_control *twt_ctrl = &twt_ele->twt_ctrl;
twt_info->responder_pm_mode = twt_ctrl->responder_pm_mode;
twt_info->nego_type = twt_ctrl->nego_type;
twt_info->wake_dur_unit = twt_ctrl->wake_dur_unit;
PHL_TRACE(COMP_PHL_TWT, _PHL_INFO_, "_twt_fill_config_info(): twt_info: responder_pm_mode:%d, nego_type:%d, wake_dur_unit:%d\n",
twt_info->responder_pm_mode, twt_info->nego_type,
twt_info->wake_dur_unit);
if (RTW_PHL_INDIV_TWT == twt_info->nego_type) {
_twt_fill_config_info_indiv(twt_info,
&twt_ele->info.i_twt_para_set);
} else {
/*todo*/
}
}
void _twt_reset_config_info(struct phl_info_t *phl,
struct phl_twt_config *config)
{
config->role = NULL;
_os_mem_set(phl_to_drvpriv(phl), &config->twt_info, 0,
sizeof(struct rtw_phl_twt_info));
}
u8 _twt_compare_twt_para(struct rtw_phl_twt_info *twt_info,
struct rtw_phl_twt_setup_info *twt_setup)
{
u8 ret = false;
u64 twt1 = 0, twt2 = 0, diff_t = 0;
u32 intvl = 0;
struct rtw_phl_twt_info cmp_info = {0};
_twt_fill_config_info(&cmp_info, twt_setup);
do {
if (cmp_info.responder_pm_mode != twt_info->responder_pm_mode)
break;
if (cmp_info.nego_type != twt_info->nego_type)
break;
if (cmp_info.trigger != twt_info->trigger)
break;
if (cmp_info.flow_type != twt_info->flow_type)
break;
if (cmp_info.implicit_lastbcast != twt_info->implicit_lastbcast)
break;
if (cmp_info.twt_protection != twt_info->twt_protection)
break;
if ((_twt_calc_wakeup_dur(cmp_info.nom_min_twt_wake_dur
, cmp_info.wake_dur_unit)) !=
(_twt_calc_wakeup_dur(twt_info->nom_min_twt_wake_dur,
twt_info->wake_dur_unit)))
break;
if ((_twt_calc_intvl(cmp_info.twt_wake_int_exp,
cmp_info.twt_wake_int_mantissa)) !=
(_twt_calc_intvl(twt_info->twt_wake_int_exp,
twt_info->twt_wake_int_mantissa)))
break;
/*compare target wake time*/
intvl = _twt_calc_intvl(twt_info->twt_wake_int_exp,
twt_info->twt_wake_int_mantissa);
twt1 = cmp_info.target_wake_time_h;
twt1 = twt1 << 32;
twt1 |= cmp_info.target_wake_time_l;
twt2 = twt_info->target_wake_time_h;
twt2 = twt2 << 32;
twt2 |= twt_info->target_wake_time_l;
if (twt1 > twt2) {
/*cmp_info target_wake_time > twt_info target_wake_time*/
diff_t = _os_minus64(twt1, twt2);
} else {
diff_t = _os_minus64(twt2, twt1);
}
if (_os_modular64(diff_t, intvl) != 0)
break;
ret = true;
} while(false);
return ret;
}
u8 _twt_is_same_config(struct phl_twt_config *config,
struct _twt_compare *compare_info)
{
bool found = false;
do {
if (config->role != compare_info->role)
break;
if (!(twt_config_state_idle == config->state ||
twt_config_state_enable == config->state))
break;
if (_twt_compare_twt_para(&config->twt_info,
&compare_info->twt_setup))
found = true;
} while(false);
PHL_TRACE(COMP_PHL_TWT, _PHL_INFO_, "<== _twt_is_same_config(): found(%d)\n",
found);
return found;
}
void _twt_dump_twt_cfg_info(struct phl_twt_cfg_info *twt_cfg_i)
{
struct phl_twt_config *config = NULL;
u8 i = 0;
config = (struct phl_twt_config *)twt_cfg_i->twt_cfg_ring;
for (i = 0; i < twt_cfg_i->twt_cfg_num; i++) {
PHL_TRACE(COMP_PHL_TWT, _PHL_DEBUG_, "_twt_dump_twt_cfg_info(): loop i(%d), cfg id(%d), state(%d)\n",
i, config[i].idx, config[i].state);
}
}
enum rtw_phl_status _twt_operate_twt_config(struct phl_info_t *phl_info,
struct phl_twt_cfg_info *twt_cfg_i, enum phl_operate_config_type type,
u8 *para, struct phl_twt_config **ret_config)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
struct phl_twt_config *config =
(struct phl_twt_config *)twt_cfg_i->twt_cfg_ring;
u8 i = 0;
PHL_TRACE(COMP_PHL_TWT, _PHL_DEBUG_, "==> _twt_operate_twt_config(): type(%d)\n",
type);
if (type == PHL_GET_CONFIG_BY_ID) {
if (*para >= twt_cfg_i->twt_cfg_num) {
PHL_TRACE(COMP_PHL_TWT, _PHL_ERR_, "_twt_operate_twt_config(): get cfg by id(%d) fail, out of range(%d)\n",
*para, twt_cfg_i->twt_cfg_num);
goto exit;
}
if (twt_config_state_free == config[*para].state){
PHL_TRACE(COMP_PHL_TWT, _PHL_ERR_, "_twt_operate_twt_config(): get cfg by id(%d) fail, cfg state is in twt_config_state_free\n",
*para);
goto exit;
}
*ret_config = &config[*para];
pstatus = RTW_PHL_STATUS_SUCCESS;
goto exit;
} else if (type == PHL_FREE_CONFIG) {
if (*para >= twt_cfg_i->twt_cfg_num) {
PHL_TRACE(COMP_PHL_TWT, _PHL_ERR_, "_twt_operate_twt_config(): free cfg by id(%d) fail, out of range(%d)\n",
*para, twt_cfg_i->twt_cfg_num);
goto exit;
}
if (twt_config_state_free == config[*para].state){
PHL_TRACE(COMP_PHL_TWT, _PHL_ERR_, "_twt_operate_twt_config(): free cfg by id(%d) fail, cfg state is in twt_config_state_free\n",
*para);
goto exit;
}
_twt_transfer_config_state(PHL_TWT_ACTION_FREE, &config[*para].state);
pstatus = RTW_PHL_STATUS_SUCCESS;
goto exit;
} else if (type == PHL_GET_HEAD_CONFIG) {
*ret_config = config;
pstatus = RTW_PHL_STATUS_SUCCESS;
goto exit;
} else if (type == PHL_GET_NEXT_CONFIG) {
u8 next_id = 0;
if (*para >= twt_cfg_i->twt_cfg_num) {
PHL_TRACE(COMP_PHL_TWT, _PHL_ERR_, "_twt_operate_twt_config(): get cfg by id(%d) fail, out of range(%d)\n",
*para, twt_cfg_i->twt_cfg_num);
goto exit;
}
next_id = *para + 1;
if (next_id == twt_cfg_i->twt_cfg_num)
next_id = 0;
*ret_config = &config[next_id];
pstatus = RTW_PHL_STATUS_SUCCESS;
goto exit;
}
for (i = 0; i < twt_cfg_i->twt_cfg_num; i++) {
PHL_TRACE(COMP_PHL_TWT, _PHL_DEBUG_, "_twt_operate_twt_config(): loop i(%d), cfg id(%d), state(%d)\n",
i, config[i].idx, config[i].state);
if (type == PHL_GET_NEW_CONFIG) {
if (twt_config_state_free != config[i].state)
continue;
_twt_reset_config_info(phl_info, &config[i]);
_twt_transfer_config_state(PHL_TWT_ACTION_ALLOC,
&config[i].state);
config[i].twt_info.twt_id = config[i].idx;
*ret_config = &config[i];
pstatus = RTW_PHL_STATUS_SUCCESS;
break;
} else if (type == PHL_GET_CONFIG_BY_ROLE) {
if (twt_config_state_free == config[i].state)
continue;
if ((struct rtw_wifi_role_t *)para != config[i].role)
continue;
*ret_config = &config[i];
pstatus = RTW_PHL_STATUS_SUCCESS;
break;
} else if (type == PHL_GET_CONFIG_BY_PARA) {
if (twt_config_state_free == config[i].state)
continue;
if (!_twt_is_same_config(&config[i],
(struct _twt_compare *)para))
continue;
*ret_config = &config[i];
pstatus = RTW_PHL_STATUS_SUCCESS;
break;
}
}
exit:
_twt_dump_twt_cfg_info(twt_cfg_i);
PHL_TRACE(COMP_PHL_TWT, _PHL_DEBUG_, "<== _twt_operate_twt_config(): pstatus:%d\n",
pstatus);
return pstatus;
}
enum rtw_phl_status _twt_sta_update(void *hal, u16 macid, u8 twt_id,
enum rtw_phl_twt_sta_action action)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
enum rtw_hal_status hstatus = RTW_HAL_STATUS_FAILURE;
hstatus = rtw_hal_twt_sta_update(hal, (u8)macid, twt_id, action);
if (hstatus != RTW_HAL_STATUS_SUCCESS) {
PHL_TRACE(COMP_PHL_TWT, _PHL_ERR_, "twt sta update fail: hstatus:%d, macid:%d, twt_id:%d, action:%d\n",
hstatus, macid, twt_id, action);
} else {
PHL_TRACE(COMP_PHL_TWT, _PHL_INFO_, "twt sta update ok: macid:%d, twt_id:%d, action:%d\n",
macid, twt_id, action);
pstatus = RTW_PHL_STATUS_SUCCESS;
}
return pstatus;
}
enum rtw_phl_status _twt_all_sta_update(struct phl_info_t *phl_info,
u8 config_id, struct phl_queue *sta_queue,
enum rtw_phl_twt_sta_action action)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
void *drv = phl_to_drvpriv(phl_info);
_os_list *sta_list = &sta_queue->queue;
struct rtw_twt_sta_info *psta = NULL;
_os_spinlock(drv, &sta_queue->lock, _bh, NULL);
phl_list_for_loop(psta, struct rtw_twt_sta_info, sta_list, list) {
if (NULL == psta)
break;
pstatus = _twt_sta_update(phl_info->hal, psta->phl_sta->macid,
config_id, action);
if (RTW_PHL_STATUS_SUCCESS != pstatus)
break;
}
_os_spinunlock(drv, &sta_queue->lock, _bh, NULL);
return pstatus;
}
struct rtw_twt_sta_info * _twt_get_sta_info(struct phl_info_t *phl_info,
struct phl_queue *sta_queue, struct rtw_phl_stainfo_t *phl_sta)
{
void *drv = phl_to_drvpriv(phl_info);
_os_list *sta_list = &sta_queue->queue;
struct rtw_twt_sta_info *psta = NULL, *ret_sta = NULL;
_os_spinlock(drv, &sta_queue->lock, _bh, NULL);
phl_list_for_loop(psta, struct rtw_twt_sta_info, sta_list, list) {
if (NULL == psta)
break;
if (phl_sta == psta->phl_sta) {
ret_sta = psta;
break;
}
}
_os_spinunlock(drv, &sta_queue->lock, _bh, NULL);
return ret_sta;
}
enum rtw_phl_status _twt_sta_enqueue(struct phl_info_t *phl_info,
struct phl_queue *sta_q, struct rtw_twt_sta_info *psta)
{
void *drv = phl_to_drvpriv(phl_info);
if (!psta)
return RTW_PHL_STATUS_FAILURE;
_os_spinlock(drv, &sta_q->lock, _bh, NULL);
list_add_tail(&psta->list, &sta_q->queue);
sta_q->cnt++;
_os_spinunlock(drv, &sta_q->lock, _bh, NULL);
return RTW_PHL_STATUS_SUCCESS;
}
struct rtw_twt_sta_info * _twt_sta_dequeue(struct phl_info_t *phl_info,
struct phl_queue *sta_q, u16 *cnt)
{
struct rtw_twt_sta_info *psta = NULL;
void *drv = phl_to_drvpriv(phl_info);
_os_spinlock(drv, &sta_q->lock, _bh, NULL);
if (list_empty(&sta_q->queue)) {
psta = NULL;
} else {
psta = list_first_entry(&sta_q->queue,
struct rtw_twt_sta_info, list);
list_del(&psta->list);
sta_q->cnt--;
*cnt = (u16)sta_q->cnt;
}
_os_spinunlock(drv, &sta_q->lock, _bh, NULL);
return psta;
}
/*
* Delete all sta entry from queue
* @sta_queue: twt sta Q
*/
void _twt_delete_all_sta(struct phl_info_t *phl_info,
struct phl_queue *sta_queue)
{
struct rtw_twt_sta_info *twt_sta;
void *drv = phl_to_drvpriv(phl_info);
u16 cnt;
PHL_TRACE(COMP_PHL_TWT, _PHL_INFO_, "==> _twt_delete_all_sta()\n");
do {
twt_sta = _twt_sta_dequeue(phl_info, sta_queue, &cnt);
if (NULL != twt_sta)
_os_mem_free(drv, twt_sta,
sizeof(struct rtw_twt_sta_info));
} while (twt_sta != NULL);
PHL_TRACE(COMP_PHL_TWT, _PHL_INFO_, "<== _twt_delete_all_sta()\n");
}
/*
* Delete twt sta entry by specific sta and id from queue
* @sta_queue: twt sta Q
* @phl_sta: specific sta
* @id: specific twt folw id/broadcast twt id or delete all
* @cnt: total num of sta entery in Q
*/
enum rtw_phl_status _twt_delete_sta(struct phl_info_t *phl_info,
struct phl_queue *sta_q,
struct rtw_phl_stainfo_t *phl_sta, u8 id, u16 *cnt)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
struct rtw_twt_sta_info *twt_sta, *f_sta;
void *drv = phl_to_drvpriv(phl_info);
f_sta = _twt_sta_dequeue(phl_info, sta_q, cnt);
twt_sta = f_sta;
do {
if (twt_sta == NULL)
break;
if ((phl_sta == twt_sta->phl_sta) &&
(DELETE_ALL == id || id == twt_sta->id)) {
_os_mem_free(drv, twt_sta,
sizeof(struct rtw_twt_sta_info));
pstatus = RTW_PHL_STATUS_SUCCESS;
break;
}
_twt_sta_enqueue(phl_info, sta_q, twt_sta);
twt_sta = _twt_sta_dequeue(phl_info, sta_q, cnt);
if (NULL != twt_sta && twt_sta == f_sta) {
_twt_sta_enqueue(phl_info, sta_q, twt_sta);
break;
}
} while (true);
return pstatus;
}
/*
* Does sta exist in twt sta entry by specific sta and id
* @sta_queue: twt sta Q
* @phl_sta: specific sta
* @id: specific twt folw id/broadcast twt id or delete all
*/
enum rtw_phl_status _twt_sta_exist(struct phl_info_t *phl_info,
struct phl_queue *sta_q,
struct rtw_phl_stainfo_t *phl_sta, u8 id)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
struct rtw_twt_sta_info *twt_sta = NULL;
void *drv = phl_to_drvpriv(phl_info);
_os_list *q_list = &sta_q->queue;
_os_spinlock(drv, &sta_q->lock, _bh, NULL);
phl_list_for_loop(twt_sta, struct rtw_twt_sta_info, q_list, list) {
if (NULL == twt_sta)
break;
if ((phl_sta == twt_sta->phl_sta) &&
(DELETE_ALL == id || id == twt_sta->id)) {
pstatus = RTW_PHL_STATUS_SUCCESS;
break;
}
}
_os_spinunlock(drv, &sta_q->lock, _bh, NULL);
return pstatus;
}
enum rtw_phl_status _twt_add_sta(struct phl_info_t *phl_info,
struct rtw_phl_stainfo_t *phl_sta,
struct phl_queue *sta_q, u8 id)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
struct rtw_twt_sta_info *twt_sta;
void *drv = phl_to_drvpriv(phl_info);
twt_sta = _os_mem_alloc(drv, sizeof(struct rtw_twt_sta_info));
if (NULL == twt_sta) {
PHL_TRACE(COMP_PHL_TWT, _PHL_WARNING_, "_phl_twt_add_sta(): alloc rtw_twt_sta_info failed\n");
} else {
twt_sta->phl_sta = phl_sta;
twt_sta->id = id;
_twt_sta_enqueue(phl_info, sta_q, twt_sta);
pstatus = RTW_PHL_STATUS_SUCCESS;
}
return pstatus;
}
enum rtw_phl_status _twt_delete_sta_info(struct phl_info_t *phl_info,
struct rtw_phl_stainfo_t *phl_sta,
u8 ignore_type, enum rtw_phl_nego_type nego_type,
u8 id, u8 *bitmap)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
struct phl_twt_info *phl_twt_info = get_twt_info(phl_info);
struct phl_twt_cfg_info *twt_cfg_i = &phl_twt_info->twt_cfg_info;
struct phl_twt_config *config = NULL, *f_config = NULL;
enum rtw_phl_nego_type type = nego_type;
bool delete_error = false;
u16 cnt;
u8 delete_id = ignore_type ? DELETE_ALL : id;
*bitmap = 0;
if (RTW_PHL_MANAGE_BCAST_TWT == nego_type)
type = RTW_PHL_BCAST_TWT;
if (RTW_PHL_STATUS_SUCCESS != _twt_operate_twt_config(phl_info, twt_cfg_i,
PHL_GET_HEAD_CONFIG, NULL, &config)) {
PHL_TRACE(COMP_PHL_TWT, _PHL_ERR_, "_twt_delete_sta_info(): Fail to get first allocate config\n");
goto exit;
}
f_config = config;
do {
PHL_TRACE(COMP_PHL_TWT, _PHL_DEBUG_, "_twt_delete_sta_info(): while loop, twt_id:%d\n",
config->twt_info.twt_id);
if (twt_config_state_free == config->state)
goto next_cfg;
if (config->role != phl_sta->wrole)
goto next_cfg;
if (false == ignore_type && config->twt_info.nego_type != type)
goto next_cfg;
if (RTW_PHL_STATUS_SUCCESS != _twt_sta_exist(phl_info,
&config->twt_sta_queue,
phl_sta, delete_id))
goto next_cfg;
if (RTW_PHL_STATUS_SUCCESS != _twt_sta_update(phl_info->hal,
phl_sta->macid,
config->twt_info.twt_id,
TWT_STA_DEL_MACID)) {
delete_error = true;
goto next_cfg;
}
if (RTW_PHL_STATUS_SUCCESS != _twt_delete_sta(phl_info,
&config->twt_sta_queue,
phl_sta, delete_id,
&cnt)) {
PHL_TRACE(COMP_PHL_TWT, _PHL_ERR_, "_twt_delete_sta_info(): Fail to delete sta from twt_sta Q, macid(0x%x), delete_id(%d)\n",
phl_sta->macid, delete_id);
delete_error = true;
goto next_cfg;
}
if (0 == cnt)
*bitmap |= (1 << config->twt_info.twt_id);
PHL_TRACE(COMP_PHL_TWT, _PHL_INFO_, "_twt_delete_sta_info(): Delete sta success, config id = %d, twt_sta_queue cnt:%d, bitmap:0x%X\n",
config->twt_info.twt_id, cnt, *bitmap);
if (DELETE_ALL != delete_id)
break;
next_cfg:
if (RTW_PHL_STATUS_SUCCESS != _twt_operate_twt_config(phl_info,
twt_cfg_i, PHL_GET_NEXT_CONFIG,
(u8 *)&config->idx, &config)) {
delete_error = true;
break;
}
} while (config != f_config);
if (false == delete_error)
pstatus = RTW_PHL_STATUS_SUCCESS;
else
pstatus = RTW_PHL_STATUS_FAILURE;
exit:
PHL_TRACE(COMP_PHL_TWT, _PHL_INFO_, "_twt_delete_sta_info(): pstatus:%d, nego_type = %d, id:%d, bitmap:0x%x\n",
pstatus, nego_type, id, *bitmap);
return pstatus;
}
enum rtw_phl_status _twt_info_update(struct phl_info_t *phl_info,
struct phl_twt_config *config,
enum phl_twt_action action)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
enum rtw_hal_status hstatus = RTW_HAL_STATUS_FAILURE;
enum rtw_phl_twt_cfg_action config_action;
PHL_TRACE(COMP_PHL_TWT, _PHL_INFO_, "==> _twt_info_update()\n");
if (PHL_TWT_ACTION_ENABLE == action) {
config_action = TWT_CFG_ADD;
} else if (PHL_TWT_ACTION_DISABLE == action) {
config_action = TWT_CFG_DELETE;
} else {
PHL_TRACE(COMP_PHL_TWT, _PHL_ERR_, "_twt_info_update(): Unexpected action:%d\n",
action);
goto exit;
}
hstatus = rtw_hal_twt_info_update(phl_info, config->twt_info, config->role,
config_action);
if (hstatus == RTW_HAL_STATUS_SUCCESS) {
_twt_transfer_config_state(action, &config->state);
pstatus = RTW_PHL_STATUS_SUCCESS;
PHL_TRACE(COMP_PHL_TWT, _PHL_WARNING_, "_twt_info_update(): update ok\n");
} else {
_twt_transfer_config_state(PHL_TWT_ACTION_UP_ERROR,
&config->state);
PHL_TRACE(COMP_PHL_TWT, _PHL_ERR_, "_twt_info_update(): update fail, hstatus:%d\n",
hstatus);
}
exit:
PHL_TRACE(COMP_PHL_TWT, _PHL_INFO_, "<==_twt_info_update(): hstatus:%d, twt_id:%d, action:%d\n",
pstatus, config->twt_info.twt_id, action);
return pstatus;
}
/*
void
_twt_free_config(
void *phl,
struct phl_twt_config *config
)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
_twt_info_update(phl_info->hal, config, PHL_TWT_ACTION_DISABLE);
_twt_delete_all_sta(phl_info, &config->twt_sta_queue);
_twt_reset_config_info(config);
_twt_transfer_config_state(PHL_TWT_ACTION_FREE, &config->state);
}
*/
bool _twt_exist_same_twt_config(struct phl_info_t *phl,
struct phl_twt_cfg_info *twt_cfg_i, struct rtw_wifi_role_t *role,
struct rtw_phl_twt_setup_info setup_info,
struct phl_twt_config **ret_config)
{
bool exist = false;
struct _twt_compare compare_info = {0};
struct phl_twt_config *config = NULL;
compare_info.role = role;
compare_info.twt_setup = setup_info;
if (RTW_PHL_STATUS_SUCCESS == _twt_operate_twt_config(phl, twt_cfg_i,
PHL_GET_CONFIG_BY_PARA, (u8 *)&compare_info, &config)) {
*ret_config = config;
exist = true;
}
return exist;
}
/*
u8
_twt_get_new_config_entry(
struct phl_info_t *phl,
struct phl_queue *twt_queue,
struct phl_twt_config **ret_config
)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
struct phl_twt_config *config = NULL;
u8 bget = false;
PHL_TRACE(COMP_PHL_TWT, _PHL_INFO_, "==> _twt_get_new_config_entry()\n");
if (RTW_PHL_STATUS_SUCCESS == _twt_operate_twt_config(phl, twt_queue,
PHL_GET_NEW_CONFIG, NULL, &config)) {
*ret_config = config;
bget = true;
}
PHL_TRACE(COMP_PHL_TWT, _PHL_INFO_, "<== _twt_get_new_config_entry(): bget:%d\n",
bget);
return bget;
}
*/
bool _twt_new_config_is_available(struct phl_info_t *phl_i)
{
struct phl_twt_info *phl_twt_info = get_twt_info(phl_i);
struct phl_twt_cfg_info *twt_cfg_i = &phl_twt_info->twt_cfg_info;
struct phl_twt_config *config = NULL;
u8 available = false;
if (RTW_PHL_STATUS_SUCCESS == _twt_operate_twt_config(phl_i, twt_cfg_i,
PHL_GET_NEW_CONFIG, NULL, &config)) {
_twt_operate_twt_config(phl_i, twt_cfg_i, PHL_FREE_CONFIG,
&config->twt_info.twt_id, NULL);
available = true;
}
PHL_TRACE(COMP_PHL_TWT, _PHL_INFO_, "_twt_new_config_is_available(): bavailable:%d\n", available);
return available;
}
/*
* Whether the twt flow id of sta exist in any twt config entry.
* @phl_sta: the specific sta
* @role: specific role for search twt config entry
* @id: twt flow id
* Note: for sta mode.
*/
u8 _twt_flow_id_exist(void *phl, struct rtw_phl_stainfo_t *phl_sta,
struct rtw_wifi_role_t *role, u8 id)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct phl_twt_info *phl_twt_info = get_twt_info(phl_info);
struct phl_twt_cfg_info *twt_cfg_i = &phl_twt_info->twt_cfg_info;
struct phl_twt_config *config = NULL, *f_config = NULL;
struct rtw_twt_sta_info *twt_sta = NULL;
bool exist = false;
PHL_TRACE(COMP_PHL_TWT, _PHL_INFO_, "==> _twt_flow_id_exist()\n");
if (RTW_PHL_STATUS_SUCCESS != _twt_operate_twt_config(phl, twt_cfg_i,
PHL_GET_HEAD_CONFIG, NULL, &config)) {
PHL_TRACE(COMP_PHL_TWT, _PHL_ERR_, "_twt_flow_id_exist(): Fail to get first allocate config\n");
goto exit;
}
f_config = config;
do {
PHL_TRACE(COMP_PHL_TWT, _PHL_DEBUG_, "_twt_flow_id_exist(): while loop\n");
if (twt_config_state_free == config->state)
goto next_cfg;
if (config->role != phl_sta->wrole ||
RTW_PHL_INDIV_TWT != config->twt_info.nego_type)
goto next_cfg;
twt_sta = _twt_get_sta_info(phl_info, &config->twt_sta_queue,
phl_sta);
if (NULL != twt_sta && id == twt_sta->id) {
exist = true;
PHL_TRACE(COMP_PHL_TWT, _PHL_INFO_, "_twt_flow_id_exist(): exist the twt_flow_id:%d\n",
id);
break;
}
next_cfg:
if (RTW_PHL_STATUS_SUCCESS != _twt_operate_twt_config(phl,
twt_cfg_i, PHL_GET_NEXT_CONFIG,
(u8 *)&config->idx, &config))
break;
} while(config != f_config);
exit:
PHL_TRACE(COMP_PHL_TWT, _PHL_INFO_, "<== _twt_flow_id_exist(): twt flow id:%d, bexist:%d\n",
id, exist);
return exist;
}
enum rtw_phl_status _twt_accept_bcast_by_sta(struct phl_info_t *phl,
struct rtw_phl_twt_setup_info *setup_info,
struct rtw_phl_stainfo_t *phl_sta, u8 *config_id)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
/*TODO*/
PHL_TRACE(COMP_PHL_TWT, _PHL_ERR_, "==> _twt_accept_bcast_by_sta(): not support, todo\n");
pstatus = RTW_PHL_STATUS_FAILURE;
return pstatus;
}
enum rtw_phl_status _twt_accept_indiv_by_sta(struct phl_info_t *phl,
struct rtw_phl_twt_setup_info *setup_info,
struct rtw_phl_stainfo_t *phl_sta, u8 *config_id)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
struct rtw_phl_twt_element *twt_ele = &setup_info->twt_element;
struct rtw_phl_twt_control *twt_ctrl = &twt_ele->twt_ctrl;
struct rtw_phl_indiv_twt_para_set *para = &twt_ele->info.i_twt_para_set;
struct rtw_phl_req_type_indiv *req_type = ¶->req_type;
u8 bitmap = 0, id = 0, i = 0;
PHL_TRACE(COMP_PHL_TWT, _PHL_INFO_, "==> _twt_accept_indiv_by_sta()\n");
if (_twt_flow_id_exist(phl, phl_sta, phl_sta->wrole,
req_type->twt_flow_id)) {
pstatus = _twt_delete_sta_info(phl, phl_sta, false,
twt_ctrl->nego_type,
req_type->twt_flow_id, &bitmap);
PHL_TRACE(COMP_PHL_TWT, _PHL_INFO_, "_twt_accept_indiv_by_sta(): twt flow id(%d) exist, first, delete twt sta, pstatus:%d, bitmap:0x%x\n",
req_type->twt_flow_id, pstatus, bitmap);
if (RTW_PHL_STATUS_SUCCESS == pstatus && bitmap != 0) {
id = 0;
do {
i = ((bitmap >> id) & BIT0);
if (i != 0) {
bitmap &= ~(BIT(id));
break;
}
id++;
} while (true);
pstatus = rtw_phl_twt_free_twt_config(phl, id);
PHL_TRACE(COMP_PHL_TWT, _PHL_INFO_, "_twt_accept_indiv_by_sta():sta Q is empty in twt config entry(%d), we free it, pstatus:%d \n",
id, pstatus);
if (bitmap !=0) {
PHL_TRACE(COMP_PHL_TWT, _PHL_ERR_, "_twt_accept_indiv_by_sta(): TWT config entry bitmap(0x%x) != 0, some twt config entry not free. please check code\n",
bitmap);
}
}
}
pstatus = rtw_phl_twt_alloc_twt_config(phl, phl_sta->wrole, *setup_info,
true, &id);
if (RTW_PHL_STATUS_SUCCESS == pstatus) {
pstatus = rtw_phl_twt_add_sta_info(phl, phl_sta, id,
req_type->twt_flow_id);
if (RTW_PHL_STATUS_SUCCESS != pstatus) {
rtw_phl_twt_free_twt_config(phl, id);
} else {
*config_id = id;
}
}
PHL_TRACE(COMP_PHL_TWT, _PHL_INFO_, "_twt_accept_indiv_by_sta(): pstatus:%d, config_id:%d\n",
pstatus, *config_id);
return pstatus;
}
/*
* Initialize twt
*/
enum rtw_phl_status phl_twt_init(void *phl)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
void *drv = phl_to_drvpriv(phl_info);
struct phl_twt_info *phl_twt_i = NULL;
struct phl_twt_cfg_info *twt_cfg_i = NULL;
struct phl_twt_config *config = NULL;
u16 len = 0;
u8 i = 0;
PHL_TRACE(COMP_PHL_TWT, _PHL_INFO_, "==> phl_twt_init()\n");
if (NULL == phl_info->phl_twt_info) {
phl_info->phl_twt_info = _os_mem_alloc(drv,
sizeof(struct phl_twt_info));
if (NULL == phl_info->phl_twt_info) {
PHL_TRACE(COMP_PHL_TWT, _PHL_WARNING_, "phl_twt_init(): Failed to allocate phl_twt_info\n");
goto exit;
}
_os_mem_set(phl_to_drvpriv(phl_info), phl_info->phl_twt_info,
0, sizeof(struct phl_twt_info));
} else {
PHL_TRACE(COMP_PHL_TWT, _PHL_WARNING_, "phl_twt_init(): Duplicate init1, please check code\n");
}
phl_twt_i = get_twt_info(phl_info);
twt_cfg_i = &phl_twt_i->twt_cfg_info;
if (NULL == twt_cfg_i->twt_cfg_ring) {
twt_cfg_i->twt_cfg_num = MAX_NUM_HW_TWT_CONFIG;
len = sizeof(struct phl_twt_config) * twt_cfg_i->twt_cfg_num;
twt_cfg_i->twt_cfg_ring = _os_mem_alloc(drv, len);
if (NULL == twt_cfg_i->twt_cfg_ring) {
twt_cfg_i->twt_cfg_num = 0;
PHL_TRACE(COMP_PHL_TWT, _PHL_WARNING_, "phl_twt_init(): Failed to allocate twt_cfg_ring\n");
goto exit;
}
config = (struct phl_twt_config *)twt_cfg_i->twt_cfg_ring;
for (i = 0; i < twt_cfg_i->twt_cfg_num; i++) {
_os_mem_set(phl_to_drvpriv(phl_info), config, 0,
sizeof(struct phl_twt_config));
config->idx = i;
pq_init(drv, &config->twt_sta_queue);
config++;
}
} else {
PHL_TRACE(COMP_PHL_TWT, _PHL_WARNING_, "phl_twt_init(): Duplicate init2, please check code\n");
}
/* init for twt_annc_queue */
pq_init(drv, &phl_twt_i->twt_annc_queue);
pstatus = RTW_PHL_STATUS_SUCCESS;
exit:
PHL_TRACE(COMP_PHL_TWT, _PHL_WARNING_, "<== phl_twt_init(): pstatus:%d\n",
pstatus);
return pstatus;
}
/*
* Deinitialize twt
*/
void phl_twt_deinit(void *phl)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
void *drv = phl_to_drvpriv(phl_info);
struct phl_twt_info *phl_twt_i = get_twt_info(phl_info);
struct phl_twt_config *config = NULL;
struct _twt_announce_info *annc_info = NULL;
struct phl_twt_cfg_info *twt_cfg_i = NULL;
u8 i = 0;
u16 len = 0;
PHL_TRACE(COMP_PHL_TWT, _PHL_INFO_, "==> phl_twt_deinit()\n");
if (NULL == phl_twt_i)
goto exit;
twt_cfg_i = &phl_twt_i->twt_cfg_info;
if (NULL == twt_cfg_i->twt_cfg_ring)
goto free_twt_info;
config = (struct phl_twt_config *)(twt_cfg_i->twt_cfg_ring);
for (i = 0; i < twt_cfg_i->twt_cfg_num; i++) {
if (config->twt_sta_queue.cnt > 0) {
PHL_TRACE(COMP_PHL_TWT, _PHL_WARNING_, "rtw_phl_twt_deinit(): config_id: %d, twt_sta_queue.cnt(%d) >0, force delete all\n",
config->idx ,config->twt_sta_queue.cnt);
_twt_delete_all_sta(phl_info, &config->twt_sta_queue);
}
pq_deinit(drv, &config->twt_sta_queue);
config++;
}
len = sizeof(struct phl_twt_config) * twt_cfg_i->twt_cfg_num;
_os_mem_free(drv, twt_cfg_i->twt_cfg_ring, len);
do {
annc_info = _twt_announce_info_dequeue(phl_info,
&phl_twt_i->twt_annc_queue);
if (NULL == annc_info)
break;
_os_mem_free(drv, annc_info, sizeof(struct _twt_announce_info));
} while(true);
pq_deinit(drv, &phl_twt_i->twt_annc_queue);
free_twt_info:
_os_mem_free(drv, phl_info->phl_twt_info, sizeof(struct phl_twt_info));
exit:
PHL_TRACE(COMP_PHL_TWT, _PHL_INFO_, "<== rtw_phl_twt_deinit()\n");
}
/*
* Allocate new twt config
* @role: the user of twt config
* @setup_info: twt setup info
* @benable: whether to enable the twt config to fw,
* if benable is equal to false, only allocate twt config entry
* @id: Output the id of twt confi entry
*/
enum rtw_phl_status rtw_phl_twt_alloc_twt_config(void *phl,
struct rtw_wifi_role_t *role,
struct rtw_phl_twt_setup_info setup_info,
u8 benable, u8 *id)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct phl_twt_info *phl_twt_info = NULL;
struct phl_twt_cfg_info *twt_cfg_i = NULL;
struct phl_twt_config *config = NULL;
bool alloc = false;
PHL_TRACE(COMP_PHL_TWT, _PHL_INFO_, "==> rtw_phl_twt_alloc_twt_config()\n");
if (false == twt_sup(phl_info)) {
PHL_TRACE(COMP_PHL_TWT, _PHL_ERR_, "rtw_phl_twt_alloc_twt_config(): twt_sup == false\n");
return pstatus;
}
if (false == twt_init(phl_info)) {
PHL_TRACE(COMP_PHL_TWT, _PHL_ERR_, "rtw_phl_twt_alloc_twt_config(): twt_init == false\n");
return pstatus;
}
phl_twt_info = get_twt_info(phl_info);
twt_cfg_i = &phl_twt_info->twt_cfg_info;
/* if (true == _twt_exist_same_twt_config(phl_info, twt_cfg_i, role,
setup_info, &config)) {
PHL_TRACE(COMP_PHL_TWT, _PHL_INFO_, "[TWT]alloc from existing config, id = %d\n",
config->twt_info.twt_id);
alloc = true;
} else */{
if (RTW_PHL_STATUS_SUCCESS == _twt_operate_twt_config(phl_info,
twt_cfg_i, PHL_GET_NEW_CONFIG, NULL, &config)) {
PHL_TRACE(COMP_PHL_TWT, _PHL_INFO_, "[TWT]alloc from new config, id = %d\n",
config->twt_info.twt_id);
alloc = true;
} else {
PHL_TRACE(COMP_PHL_TWT, _PHL_WARNING_, "[TWT]fail to alloc new config\n");
pstatus = RTW_PHL_STATUS_RESOURCE;
}
}
if (true == alloc) {
*id = config->twt_info.twt_id;
config->role = role;
_twt_fill_config_info(&config->twt_info, &setup_info);
if (benable) {
pstatus = _twt_info_update(phl_info->hal, config,
PHL_TWT_ACTION_ENABLE);
if (RTW_PHL_STATUS_SUCCESS != pstatus) {
/*todo*/
}
} else {
pstatus = RTW_PHL_STATUS_SUCCESS;
}
}
PHL_TRACE(COMP_PHL_TWT, _PHL_INFO_, "<== rtw_phl_twt_alloc_twt_config(): pstatus:%d\n",
pstatus);
return pstatus;
}
/*
* Free twt config entry by specific config ID
* @id: id of twt config entry
*/
enum rtw_phl_status rtw_phl_twt_free_twt_config(void *phl, u8 id)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct phl_twt_info *phl_twt_info = NULL;
struct phl_twt_cfg_info *twt_cfg_i = NULL;
struct phl_twt_config *config = NULL;
PHL_TRACE(COMP_PHL_TWT, _PHL_INFO_, "==> rtw_phl_twt_free_twt_config()\n");
if (false == twt_sup(phl_info)) {
PHL_TRACE(COMP_PHL_TWT, _PHL_ERR_, "rtw_phl_twt_free_twt_config(): twt_sup == false\n");
return pstatus;
}
if (false == twt_init(phl_info)) {
PHL_TRACE(COMP_PHL_TWT, _PHL_ERR_, "rtw_phl_twt_free_twt_config(): twt_init == false\n");
return pstatus;
}
phl_twt_info = get_twt_info(phl_info);
twt_cfg_i = &phl_twt_info->twt_cfg_info;
if (RTW_PHL_STATUS_SUCCESS == _twt_operate_twt_config(phl, twt_cfg_i,
PHL_GET_CONFIG_BY_ID,
&id, &config)) {
_twt_info_update(phl_info->hal, config, PHL_TWT_ACTION_DISABLE);
_twt_delete_all_sta(phl_info, &config->twt_sta_queue);
_twt_operate_twt_config(phl, twt_cfg_i, PHL_FREE_CONFIG, &id,
NULL);
PHL_TRACE(COMP_PHL_TWT, _PHL_INFO_, "[TWT]Free twt config success, id = %d\n",
id);
pstatus = RTW_PHL_STATUS_SUCCESS;
}
PHL_TRACE(COMP_PHL_TWT, _PHL_INFO_, "<== rtw_phl_twt_free_twt_config(): pstatus:%d\n",
pstatus);
return pstatus;
}
/*
* Enable twt config by specific config id
* @id: id of twt confi entry
*/
enum rtw_phl_status rtw_phl_twt_enable_twt_config(void *phl, u8 id)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct phl_twt_info *phl_twt_info = NULL;
struct phl_twt_cfg_info *twt_cfg_i = NULL;
struct phl_twt_config *config = NULL;
PHL_TRACE(COMP_PHL_TWT, _PHL_INFO_, "==> rtw_phl_twt_enable_twt_config()\n");
if (false == twt_sup(phl_info)) {
PHL_TRACE(COMP_PHL_TWT, _PHL_ERR_, "rtw_phl_twt_enable_twt_config(): twt_sup == false\n");
return pstatus;
}
if (false == twt_init(phl_info)) {
PHL_TRACE(COMP_PHL_TWT, _PHL_ERR_, "rtw_phl_twt_enable_twt_config(): twt_init == false\n");
return pstatus;
}
phl_twt_info = get_twt_info(phl_info);
twt_cfg_i = &phl_twt_info->twt_cfg_info;
if (RTW_PHL_STATUS_SUCCESS == _twt_operate_twt_config(phl, twt_cfg_i,
PHL_GET_CONFIG_BY_ID, &id, &config)) {
pstatus = _twt_info_update(phl_info->hal, config,
PHL_TWT_ACTION_ENABLE);
if (RTW_PHL_STATUS_SUCCESS == pstatus) {
_twt_all_sta_update(phl_info, id, &config->twt_sta_queue,
TWT_STA_ADD_MACID);
}
}
PHL_TRACE(COMP_PHL_TWT, _PHL_INFO_, "<== rtw_phl_twt_enable_twt_config(): pstatus:%d, id = %d\n",
pstatus, id);
return pstatus;
}
/*
* Free all twt config by specific role
* @role: specific role for search twt config entry
*/
enum rtw_phl_status rtw_phl_twt_free_all_twt_by_role(void *phl,
struct rtw_wifi_role_t *role)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct phl_twt_info *phl_twt_info = NULL;
struct phl_twt_cfg_info *twt_cfg_i = NULL;
struct phl_twt_config *config;
u8 id;
bool free = false;
PHL_TRACE(COMP_PHL_TWT, _PHL_INFO_, "==> rtw_phl_twt_free_all_twt_by_role()\n");
if (false == twt_sup(phl_info)) {
PHL_TRACE(COMP_PHL_TWT, _PHL_ERR_, "rtw_phl_twt_free_all_twt_by_role(): twt_sup == false\n");
return pstatus;
}
if (false == twt_init(phl_info)) {
PHL_TRACE(COMP_PHL_TWT, _PHL_ERR_, "rtw_phl_twt_free_all_twt_by_role(): twt_init == false\n");
return pstatus;
}
phl_twt_info = get_twt_info(phl_info);
twt_cfg_i = &phl_twt_info->twt_cfg_info;
do {
pstatus = _twt_operate_twt_config(phl, twt_cfg_i,
PHL_GET_CONFIG_BY_ROLE, (u8 *)role, &config);
if (RTW_PHL_STATUS_SUCCESS != pstatus)
break;
id = config->twt_info.twt_id;
_twt_info_update(phl_info->hal, config, PHL_TWT_ACTION_DISABLE);
_twt_delete_all_sta(phl_info, &config->twt_sta_queue);
_twt_operate_twt_config(phl, twt_cfg_i, PHL_FREE_CONFIG, &id,
NULL);
free = true;
PHL_TRACE(COMP_PHL_TWT, _PHL_INFO_, "Free twt config success, id = %d\n",
id);
} while(true);
if (true == free)
pstatus = RTW_PHL_STATUS_SUCCESS;
PHL_TRACE(COMP_PHL_TWT, _PHL_INFO_, "<== rtw_phl_twt_free_all_twt_by_role(): pstatus:%d\n",
pstatus);
return pstatus;
}
/*
* Pause all twt config by specific role
* @role: specific role for search twt config entry
*/
enum rtw_phl_status rtw_phl_twt_disable_all_twt_by_role(void *phl,
struct rtw_wifi_role_t *role)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct phl_twt_info *phl_twt_info = NULL;
struct phl_twt_cfg_info *twt_cfg_i = NULL;
struct phl_twt_config *config = NULL, *f_config = NULL;
PHL_TRACE(COMP_PHL_TWT, _PHL_INFO_, "==> rtw_phl_twt_disable_all_twt_by_role()\n");
if (false == twt_sup(phl_info)) {
PHL_TRACE(COMP_PHL_TWT, _PHL_ERR_, "rtw_phl_twt_disable_all_twt_by_role(): twt_sup == false\n");
goto exit;
}
if (false == twt_init(phl_info)) {
PHL_TRACE(COMP_PHL_TWT, _PHL_ERR_, "rtw_phl_twt_disable_all_twt_by_role(): twt_init == false\n");
goto exit;
}
phl_twt_info = get_twt_info(phl_info);
twt_cfg_i = &phl_twt_info->twt_cfg_info;
if (RTW_PHL_STATUS_SUCCESS != _twt_operate_twt_config(phl, twt_cfg_i,
PHL_GET_HEAD_CONFIG, NULL, &config)) {
PHL_TRACE(COMP_PHL_TWT, _PHL_ERR_, "rtw_phl_twt_disable_all_twt_by_role(): Fail to get first allocate config\n");
goto exit;
}
f_config = config;
do {
PHL_TRACE(COMP_PHL_TWT, _PHL_INFO_, "rtw_phl_twt_disable_all_twt_by_role(): while loop, twt_id:%d\n",
config->twt_info.twt_id);
if (twt_config_state_free == config->state)
goto next_cfg;
if (config->role == role)
_twt_info_update(phl_info->hal, config,
PHL_TWT_ACTION_DISABLE);
next_cfg:
if (RTW_PHL_STATUS_SUCCESS != _twt_operate_twt_config(phl,
twt_cfg_i, PHL_GET_NEXT_CONFIG,
(u8 *)&config->idx, &config))
goto exit;
} while(config != f_config);
pstatus = RTW_PHL_STATUS_SUCCESS;
exit:
PHL_TRACE(COMP_PHL_TWT, _PHL_INFO_, "<== rtw_phl_twt_disable_all_twt_by_role(): pstatus:%d\n",
pstatus);
return pstatus;
}
/*
* Enable all twt config by specific role
* @role: specific role for search twt config entry
*/
enum rtw_phl_status rtw_phl_twt_enable_all_twt_by_role(void *phl,
struct rtw_wifi_role_t *role)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct phl_twt_info *phl_twt_info = NULL;
struct phl_twt_cfg_info *twt_cfg_i = NULL;
struct phl_twt_config *config = NULL, *f_config = NULL;
bool error = false;
PHL_TRACE(COMP_PHL_TWT, _PHL_INFO_, "==> rtw_phl_twt_enable_all_twt_by_role()\n");
if (false == twt_sup(phl_info)) {
PHL_TRACE(COMP_PHL_TWT, _PHL_ERR_, "rtw_phl_twt_enable_all_twt_by_role(): twt_sup == false\n");
goto exit;
}
if (false == twt_init(phl_info)) {
PHL_TRACE(COMP_PHL_TWT, _PHL_ERR_, "rtw_phl_twt_enable_all_twt_by_role(): twt_init == false\n");
goto exit;
}
phl_twt_info = get_twt_info(phl_info);
twt_cfg_i = &phl_twt_info->twt_cfg_info;
if (RTW_PHL_STATUS_SUCCESS != _twt_operate_twt_config(phl, twt_cfg_i,
PHL_GET_HEAD_CONFIG, NULL, &config)) {
PHL_TRACE(COMP_PHL_TWT, _PHL_ERR_, "rtw_phl_twt_enable_all_twt_by_role(): Fail to get first allocate config\n");
goto exit;
}
f_config = config;
do {
PHL_TRACE(COMP_PHL_TWT, _PHL_DEBUG_, "rtw_phl_twt_enable_all_twt_by_role(): while loop, twt_id:%d\n",
config->twt_info.twt_id);
if (twt_config_state_free == config->state)
goto next_cfg;
if (config->role != role)
goto next_cfg;
pstatus = _twt_info_update(phl_info->hal, config,
PHL_TWT_ACTION_ENABLE);
if (RTW_PHL_STATUS_SUCCESS == pstatus) {
pstatus = _twt_all_sta_update(phl_info,
config->twt_info.twt_id,
&config->twt_sta_queue,
TWT_STA_ADD_MACID);
if (RTW_PHL_STATUS_SUCCESS != pstatus) {
error = true;
PHL_TRACE(COMP_PHL_TWT, _PHL_WARNING_, "rtw_phl_twt_enable_all_twt_by_role(): Fail to update all twt sta, twt_id:%d\n",
config->twt_info.twt_id);
}
} else {
error = true;
PHL_TRACE(COMP_PHL_TWT, _PHL_WARNING_, "rtw_phl_twt_enable_all_twt_by_role(): Fail to enable twt config, twt_id:%d\n",
config->twt_info.twt_id);
}
next_cfg:
if (RTW_PHL_STATUS_SUCCESS != _twt_operate_twt_config(phl,
twt_cfg_i, PHL_GET_NEXT_CONFIG,
(u8 *)&config->idx, &config)) {
error = true;
break;
}
} while(config != f_config);
if (true == error)
pstatus = RTW_PHL_STATUS_FAILURE;
exit:
PHL_TRACE(COMP_PHL_TWT, _PHL_INFO_, "<== rtw_phl_twt_enable_all_twt_by_role(): pstatus:%d\n",
pstatus);
return pstatus;
}
/*
* Add twt sta to specifi twt conig entry
* @phl_sta: sta entry that you wnat to add in specifi twt conig entry
* @config_id: id of target twt config entry
* @id: twt flow id/ broadcast twt id
*/
enum rtw_phl_status rtw_phl_twt_add_sta_info(void *phl,
struct rtw_phl_stainfo_t *phl_sta, u8 config_id, u8 id)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct phl_twt_info *phl_twt_info = NULL;
struct phl_twt_cfg_info *twt_cfg_i = NULL;
struct phl_twt_config *config = NULL;
struct phl_queue *sta_q = NULL;
u16 cnt = 0;
PHL_TRACE(COMP_PHL_TWT, _PHL_INFO_, "==> rtw_phl_twt_add_sta_info()\n");
if (false == twt_sup(phl_info)) {
PHL_TRACE(COMP_PHL_TWT, _PHL_ERR_, "rtw_phl_twt_add_sta_info(): twt_sup == false\n");
goto fail;
}
if (false == twt_init(phl_info)) {
PHL_TRACE(COMP_PHL_TWT, _PHL_ERR_, "rtw_phl_twt_add_sta_info(): twt_init == false\n");
goto fail;
}
phl_twt_info = get_twt_info(phl_info);
twt_cfg_i = &phl_twt_info->twt_cfg_info;
if (RTW_PHL_STATUS_SUCCESS != _twt_operate_twt_config(phl, twt_cfg_i,
PHL_GET_CONFIG_BY_ID,
&config_id, &config)) {
PHL_TRACE(COMP_PHL_TWT, _PHL_WARNING_, "Fail to get TWT config by id(%d)\n",
config_id);
goto fail;
}
sta_q = &config->twt_sta_queue;
if (RTW_PHL_STATUS_SUCCESS != _twt_add_sta(phl, phl_sta, sta_q, id)) {
goto fail;
}
if (RTW_PHL_STATUS_SUCCESS != _twt_sta_update(phl_info->hal,
phl_sta->macid, config_id, TWT_STA_ADD_MACID)) {
_twt_delete_sta(phl, sta_q, phl_sta, id, &cnt);
goto fail;
}
pstatus = RTW_PHL_STATUS_SUCCESS;
fail:
PHL_TRACE(COMP_PHL_TWT, _PHL_INFO_, "<== rtw_phl_twt_add_sta_info(): pstatus:%d\n",
pstatus);
return pstatus;
}
/*
* Remove all twt sta from twt config entry by specific sta entry
* @phl_sta: sta entry that you wnat to remove
* @bitmap: Output the bitmap. Indicate the statue that twt sta don't exist in the twt config entry that twt sta removed from it.
* ex: Bitmap=10: We remove the twt sta from id 1, id 3 and other id of twt config entry,
* but after remove, there are no twt sta existing in the twt config entry of id 1 and id 3.
* ex: Bitmap=0: We remove the twt sta, after remove, there are at least one twt sta existing in the twt config entry that twt sta removed from it.
*/
enum rtw_phl_status rtw_phl_twt_delete_all_sta_info(void *phl,
struct rtw_phl_stainfo_t *phl_sta, u8 *bitmap)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
PHL_TRACE(COMP_PHL_TWT, _PHL_INFO_, "==> rtw_phl_twt_delete_all_sta_info()\n");
if (false == twt_sup(phl_info)) {
PHL_TRACE(COMP_PHL_TWT, _PHL_ERR_, "rtw_phl_twt_delete_all_sta_info(): twt_sup == false\n");
return pstatus;
}
if (false == twt_init(phl_info)) {
PHL_TRACE(COMP_PHL_TWT, _PHL_ERR_, "rtw_phl_twt_delete_all_sta_info(): twt_init == false\n");
return pstatus;
}
pstatus = _twt_delete_sta_info(phl, phl_sta, true, 0, 0, bitmap);
PHL_TRACE(COMP_PHL_TWT, _PHL_INFO_, "<== rtw_phl_twt_delete_all_sta_info(): pstatus:%d, bitmap:0x%x\n",
pstatus, *bitmap);
return pstatus;
}
/*
* Remove twt sta when tx/rx twt teardown frame
* @phl_sta: sta entry that you wnat to remove
* @twt_flow: twt flow field info
* @bitmap: Output the bitmap. Indicate the statue that twt sta don't exist in the twt config entry that twt sta removed from it.
* ex: Bitmap=10: We remove the twt sta from id 1, id 3 and other id of twt config entry,
* but after remove, there are no twt sta existing in the twt config entry of id 1 and id 3.
* ex: Bitmap=0: We remove the twt sta, after remove, there are at least one twt sta existing in the twt config entry that twt sta removed from it.
*/
enum rtw_phl_status rtw_phl_twt_teardown_sta(void *phl,
struct rtw_phl_stainfo_t *phl_sta,
struct rtw_phl_twt_flow_field *twt_flow, u8 *bitmap)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
u8 id = 0;
PHL_TRACE(COMP_PHL_TWT, _PHL_INFO_, "==> rtw_phl_twt_teardown_sta()\n");
if (false == twt_sup(phl_info)) {
PHL_TRACE(COMP_PHL_TWT, _PHL_ERR_, "rtw_phl_twt_teardown_sta(): twt_sup == false\n");
return pstatus;
}
if (false == twt_init(phl_info)) {
PHL_TRACE(COMP_PHL_TWT, _PHL_ERR_, "rtw_phl_twt_teardown_sta(): twt_init == false\n");
return pstatus;
}
if (RTW_PHL_INDIV_TWT == twt_flow->nego_type ||
RTW_PHL_WAKE_TBTT_INR == twt_flow->nego_type) {
if (twt_flow->info.twt_flow01.teardown_all)
id = DELETE_ALL;
else
id = twt_flow->info.twt_flow01.twt_flow_id;
pstatus = RTW_PHL_STATUS_SUCCESS;
} else if (RTW_PHL_BCAST_TWT == twt_flow->nego_type ) {
/*Todo*/
} else if (RTW_PHL_MANAGE_BCAST_TWT == twt_flow->nego_type) {
if (twt_flow->info.twt_flow3.teardown_all)
id = DELETE_ALL;
else
id = twt_flow->info.twt_flow3.bcast_twt_id;
pstatus = RTW_PHL_STATUS_SUCCESS;
} else {
PHL_TRACE(COMP_PHL_TWT, _PHL_WARNING_, "rtw_phl_twt_teardown_sta(): Unknown nego_type:%d\n",
twt_flow->nego_type);
}
if (RTW_PHL_STATUS_SUCCESS == pstatus) {
pstatus = _twt_delete_sta_info(phl, phl_sta, false,
twt_flow->nego_type, id, bitmap);
}
PHL_TRACE(COMP_PHL_TWT, _PHL_INFO_, "<== rtw_phl_twt_teardown_sta(): pstatus:%d, twt_flow->nego_type:%d, id:%d, bitmap:0x%x\n",
pstatus, twt_flow->nego_type, id, *bitmap);
return pstatus;
}
/*
* Assign new flow id for twt setup of sta.
* @phl_sta: the specific sta
* @role: specific role for search twt config entry
* @id: Output: twt flow id
* Note: for sta mode.
*/
enum rtw_phl_status rtw_phl_twt_get_new_flow_id(void *phl,
struct rtw_phl_stainfo_t *phl_sta, u8 *id)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_RESOURCE;
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct phl_twt_info *phl_twt_info = NULL;
struct phl_twt_cfg_info *twt_cfg_i = NULL;
struct phl_twt_config *config = NULL, *f_config = NULL;
struct rtw_twt_sta_info *twt_sta = NULL;
u8 use_map = 0, unuse_map = 0;
u8 i = 0;
PHL_TRACE(COMP_PHL_TWT, _PHL_INFO_, "==> rtw_phl_twt_get_new_flow_id()\n");
if (false == twt_sup(phl_info)) {
PHL_TRACE(COMP_PHL_TWT, _PHL_ERR_, "rtw_phl_twt_get_new_flow_id(): twt_sup == false\n");
goto exit;
}
if (false == twt_init(phl_info)) {
PHL_TRACE(COMP_PHL_TWT, _PHL_ERR_, "rtw_phl_twt_get_new_flow_id(): twt_init == false\n");
goto exit;
}
phl_twt_info = get_twt_info(phl_info);
twt_cfg_i = &phl_twt_info->twt_cfg_info;
if (false == _twt_new_config_is_available(phl_info))
goto exit;
if (RTW_PHL_STATUS_SUCCESS != _twt_operate_twt_config(phl, twt_cfg_i,
PHL_GET_HEAD_CONFIG, NULL, &config)) {
PHL_TRACE(COMP_PHL_TWT, _PHL_ERR_, "rtw_phl_twt_get_new_flow_id(): Fail to get first allocate config\n");
goto exit;
}
f_config = config;
do {
PHL_TRACE(COMP_PHL_TWT, _PHL_INFO_, "rtw_phl_twt_get_new_flow_id(): while loop\n");
if (twt_config_state_free == config->state)
goto next_cfg;
if (config->role != phl_sta->wrole ||
RTW_PHL_INDIV_TWT != config->twt_info.nego_type)
goto next_cfg;
twt_sta = _twt_get_sta_info(phl_info, &config->twt_sta_queue,
phl_sta);
if (NULL != twt_sta) {
use_map |= (1 << twt_sta->id);
PHL_TRACE(COMP_PHL_TWT, _PHL_INFO_, "rtw_phl_twt_get_new_flow_id(): config_ID:%d, get match sta, twt_sta->id:%d\n",
config->twt_info.twt_id, twt_sta->id);
}
next_cfg:
if (RTW_PHL_STATUS_SUCCESS != _twt_operate_twt_config(phl,
twt_cfg_i, PHL_GET_NEXT_CONFIG,
(u8 *)&config->idx, &config))
goto exit;
} while(config != f_config);
unuse_map = (~use_map) & 0xFF;
i = 0;
while ((unuse_map >> i) > 0) {
if ((unuse_map >> i) & BIT0) {
*id = i;
pstatus = RTW_PHL_STATUS_SUCCESS;
break;
}
i++;
}
exit:
PHL_TRACE(COMP_PHL_TWT, _PHL_INFO_, "<== rtw_phl_twt_get_new_flow_id(): pstatus:%d, use_map:%d, unuse_map:%d, new_flow_id:%d\n",
pstatus, use_map, unuse_map, *id);
return pstatus;
}
/*
* get target wake time
* @port: port num of role
* @id: reference id of twt configuration
* @offset: unit: ms. An amount of time that you will start TWT from now
* @tsf_h: return high 4-byte value of target wake time
* @tsf_l: return low 4-byte value of target wake time
*/
enum rtw_phl_status rtw_phl_twt_get_target_wake_time(void *phl,
u8 port, u8 id, u16 offset, u32 *tsf_h, u32 *tsf_l)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
enum rtw_hal_status hstatus = RTW_HAL_STATUS_FAILURE;
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct phl_twt_info *phl_twt_info = NULL;
struct phl_twt_cfg_info *twt_cfg_i = NULL;
struct phl_twt_config *config = NULL;
u32 c_tsf_l = 0, c_tsf_h = 0, intvl = 0;
u64 cur_tsf = 0, tgt_tsf = 0, ref_tsf = 0, dif_tsf = 0;
PHL_TRACE(COMP_PHL_TWT, _PHL_INFO_, "==> rtw_phl_twt_get_target_wake_time()\n");
if (false == twt_sup(phl_info)) {
PHL_TRACE(COMP_PHL_TWT, _PHL_ERR_, "rtw_phl_twt_get_target_wake_time(): twt_sup == false\n");
goto exit;
}
if (false == twt_init(phl_info)) {
PHL_TRACE(COMP_PHL_TWT, _PHL_ERR_, "rtw_phl_twt_get_target_wake_time(): twt_init == false\n");
goto exit;
}
phl_twt_info = get_twt_info(phl_info);
twt_cfg_i = &phl_twt_info->twt_cfg_info;
hstatus = rtw_hal_get_tsf(phl_info->hal, port, &c_tsf_h, &c_tsf_l);
if (RTW_HAL_STATUS_FAILURE == hstatus) {
PHL_TRACE(COMP_PHL_TWT, _PHL_WARNING_, "rtw_phl_twt_get_target_wake_time(): Fail to get tsf, hstatus:%d, port:%d\n",
hstatus, port);
goto exit;
}
cur_tsf = c_tsf_h;
cur_tsf = cur_tsf << 32;
cur_tsf |= c_tsf_l;
if (IGNORE_CFG_ID == id) {
tgt_tsf = _os_add64(cur_tsf, offset * 1000);
} else {
pstatus = _twt_operate_twt_config(phl, twt_cfg_i,
PHL_GET_CONFIG_BY_ID, &id, &config);
if (RTW_PHL_STATUS_SUCCESS != pstatus) {
PHL_TRACE(COMP_PHL_TWT, _PHL_WARNING_, "rtw_phl_twt_get_target_wake_time(): Fail to get twt entry by id, pstatus:%d, id:%d\n",
pstatus, id);
goto exit;
}
ref_tsf = config->twt_info.target_wake_time_h;
ref_tsf = ref_tsf << 32;
ref_tsf |= config->twt_info.target_wake_time_l;
intvl = _twt_calc_intvl(config->twt_info.twt_wake_int_exp,
config->twt_info.twt_wake_int_mantissa);
tgt_tsf = _os_add64(cur_tsf, offset * 1000);
dif_tsf = _os_minus64(tgt_tsf, ref_tsf);
tgt_tsf = _os_minus64(tgt_tsf, _os_modular64(dif_tsf, intvl));
tgt_tsf = _os_add64(tgt_tsf, intvl);
}
*tsf_h = (u32)(tgt_tsf >> 32);
*tsf_l = (u32)(tgt_tsf);
pstatus = RTW_PHL_STATUS_SUCCESS;
exit:
PHL_TRACE(COMP_PHL_TWT, _PHL_INFO_, "<== rtw_phl_twt_get_target_wake_time(): pstatus(%d), port:%d, twt_id:%d, offset:0x%08x, tsf_h: 0x%08X, tsf_l: 0x%08X\n",
pstatus, port, id, offset, *tsf_h, *tsf_l);
return pstatus;
}
/*
* Fill twt element
* @twt_ele: twt element info
* @buf: fill memory
* @len: the length of twt element
*/
enum rtw_phl_status rtw_phl_twt_fill_twt_element(
struct rtw_phl_twt_element *twt_ele, u8 *buf, u8 *len)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
u8 twt_para_length = 0;
struct rtw_phl_twt_control *twt_ctrl = NULL;
if (twt_ele == NULL || buf == NULL || len == NULL) {
PHL_TRACE(COMP_PHL_TWT, _PHL_ERR_, "rtw_phl_twt_fill_twt_element(): twt_ele or buf or len = NULL\n");
return pstatus;
}
twt_ctrl = &twt_ele->twt_ctrl;
PHL_TRACE(COMP_PHL_TWT, _PHL_INFO_, "==> rtw_phl_twt_fill_twt_element(): twt_ctrl: ndp_paging_indic(%d), responder_pm_mode(%d), nego_type(%d), twt_info_frame_disable(%d), wake_dur_unit(%d)\n",
twt_ctrl->ndp_paging_indic, twt_ctrl->responder_pm_mode,
twt_ctrl->nego_type, twt_ctrl->twt_info_frame_disable,
twt_ctrl->wake_dur_unit);
*len = 0;
/*Control filed*/
SET_TWT_CONTROL_NDP_PAGING_INDICATOR(buf, twt_ctrl->ndp_paging_indic);
SET_TWT_CONTROL_RESPONDER_PM_MODE(buf, twt_ctrl->responder_pm_mode);
SET_TWT_CONTROL_NEGOTIATION_TYPE(buf, twt_ctrl->nego_type);
SET_TWT_CONTROL_TWT_INFORMATION_FRAME_DISABLE(buf,
twt_ctrl->twt_info_frame_disable);
SET_TWT_CONTROL_WAKE_DURATION_UNIT(buf, twt_ctrl->wake_dur_unit);
*len += CONTROL_LENGTH;
/*TWT Parameter Information*/
if (RTW_PHL_INDIV_TWT == twt_ctrl->nego_type) {
pstatus = _twt_fill_individual_twt_para_set(
&twt_ele->info.i_twt_para_set,
twt_ctrl->ndp_paging_indic,
buf + *len, &twt_para_length);
*len += twt_para_length;
} else {
/*todo*/
PHL_TRACE(COMP_PHL_TWT, _PHL_ERR_, "rtw_phl_fill_twt_element(): not support, todo, twt_ctrl->nego_type(%d)\n",
twt_ctrl->nego_type);
}
PHL_TRACE(COMP_PHL_TWT, _PHL_INFO_, "<== rtw_phl_fill_twt_element()\n");
return pstatus;
}
/*
* Fill twt flow field of TWT teardown frame
* @twt_flow: twt flow field info
* @buf: fill memory
* @length: the length of twt flow field
*/
enum rtw_phl_status rtw_phl_twt_fill_flow_field(
struct rtw_phl_twt_flow_field *twt_flow, u8 *buf, u16 *length)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
PHL_TRACE(COMP_PHL_TWT, _PHL_INFO_, "==> rtw_phl_twt_fill_flow_field()\n");
if (twt_flow == NULL || buf == NULL || length == NULL) {
PHL_TRACE(COMP_PHL_TWT, _PHL_ERR_, "rtw_phl_twt_fill_flow_field(): twt_flow or buf or length = NULL\n");
return pstatus;
}
*length = 0;
if (RTW_PHL_INDIV_TWT == twt_flow->nego_type ||
RTW_PHL_WAKE_TBTT_INR == twt_flow->nego_type) {
struct rtw_phl_twt_flow_type01 *flow_info =
&twt_flow->info.twt_flow01;
SET_TWT_FLOW_ID(buf, flow_info->twt_flow_id);
SET_NEGOTIATION_TYPE(buf, twt_flow->nego_type);
SET_TEARDOWN_ALL_TWT(buf, flow_info->teardown_all);
*length = TWT_FLOW_FIELD_LENGTH;
pstatus = RTW_PHL_STATUS_SUCCESS;
} else if (RTW_PHL_BCAST_TWT == twt_flow->nego_type) {
SET_NEGOTIATION_TYPE(buf, twt_flow->nego_type);
*length = TWT_FLOW_FIELD_LENGTH;
pstatus = RTW_PHL_STATUS_SUCCESS;
} else if (RTW_PHL_MANAGE_BCAST_TWT == twt_flow->nego_type) {
struct rtw_phl_twt_flow_type3 *flow_info =
&twt_flow->info.twt_flow3;
SET_BROADCAST_TWT_ID(buf, flow_info->bcast_twt_id);
SET_NEGOTIATION_TYPE(buf, twt_flow->nego_type);
SET_TEARDOWN_ALL_TWT(buf, flow_info->teardown_all);
*length = TWT_FLOW_FIELD_LENGTH;
pstatus = RTW_PHL_STATUS_SUCCESS;
} else {
PHL_TRACE(COMP_PHL_TWT, _PHL_ERR_, "rtw_phl_twt_fill_flow_field(): Unknown type(%d)\n",
twt_flow->nego_type);
}
PHL_TRACE(COMP_PHL_TWT, _PHL_INFO_, "<== rtw_phl_twt_fill_flow_field()\n");
return pstatus;
}
/*
* Parse twt element from pkt
* @twt_ele: the address of twt elemant
* @length: length of pkt
* @twt_element: Parse info
*/
enum rtw_phl_status rtw_phl_twt_parse_element(u8 *twt_ele, u16 length,
struct rtw_phl_twt_element *twt_element)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
struct rtw_phl_twt_control *twt_ctrl = NULL;
u8 ele_len = 0, ele_id = 0;
u8 *next_buf = twt_ele;
PHL_TRACE(COMP_PHL_TWT, _PHL_INFO_, "==> rtw_phl_twt_parse_element()\n");
if (twt_ele == NULL || twt_element == NULL) {
PHL_TRACE(COMP_PHL_TWT, _PHL_ERR_, "rtw_phl_twt_parse_element(): twt_ele or twt_element = NULL\n");
return pstatus;
}
twt_ctrl = &twt_element->twt_ctrl;
if (length < (MIN_TWT_ELE_LEN + ELEM_ID_LEN + ELEM_LEN_LEN)) {
PHL_TRACE(COMP_PHL_TWT, _PHL_WARNING_, "rtw_phl_twt_parse_element(): error buffer length(%d) < %d\n",
length, (MIN_TWT_ELE_LEN + ELEM_ID_LEN + ELEM_LEN_LEN));
goto exit;
}
ele_id = GET_ELE_ID(next_buf);
next_buf += ELEM_ID_LEN;
ele_len = GET_ELE_LEN(next_buf);
next_buf += ELEM_LEN_LEN;
PHL_TRACE(COMP_PHL_TWT, _PHL_INFO_, "rtw_phl_twt_parse_element(): ele_id:%d, ele_len:%d, length:%d\n",
ele_id, ele_len, length);
if (ele_len < MIN_TWT_ELE_LEN) {
PHL_TRACE(COMP_PHL_TWT, _PHL_WARNING_, "rtw_phl_twt_parse_element(): error ele length(%d) < %d\n",
ele_len, MIN_TWT_ELE_LEN);
goto exit;
}
twt_ctrl->ndp_paging_indic =
GET_TWT_CONTROL_NDP_PAGING_INDICATOR(next_buf);
twt_ctrl->responder_pm_mode =
GET_TWT_CONTROL_RESPONDER_PM_MODE(next_buf);
twt_ctrl->nego_type = GET_TWT_CONTROL_NEGOTIATION_TYPE(next_buf);
twt_ctrl->twt_info_frame_disable =
GET_TWT_CONTROL_TWT_INFORMATION_FRAME_DISABLE(next_buf);
twt_ctrl->wake_dur_unit = GET_TWT_CONTROL_WAKE_DURATION_UNIT(next_buf);
PHL_TRACE(COMP_PHL_TWT, _PHL_INFO_, "rtw_phl_twt_parse_element(): twt_ctrl: ndp_paging_indic(%d), responder_pm_mode(%d), nego_type(%d), twt_info_frame_disable(%d), wake_dur_unit(%d)\n",
twt_ctrl->ndp_paging_indic, twt_ctrl->responder_pm_mode,
twt_ctrl->nego_type, twt_ctrl->twt_info_frame_disable,
twt_ctrl->wake_dur_unit);
if (RTW_PHL_INDIV_TWT == twt_ctrl->nego_type) {
pstatus = _twt_parse_individual_twt_para(twt_ele, length,
twt_element);
} else {
/*todo*/
PHL_TRACE(COMP_PHL_TWT, _PHL_ERR_, "rtw_phl_twt_parse_element(): not support, todo, twt_ctrl->nego_type(%d)\n",
twt_ctrl->nego_type);
}
exit:
PHL_TRACE(COMP_PHL_TWT, _PHL_INFO_, "<== rtw_phl_twt_parse_element(): pstatus(%d)\n",
pstatus);
return pstatus;
}
/*
* Parse twt setup info from pkt
* @pkt: the address of Category of twt setup info frame
* @length: length of pkt
* @twt_setup_info: Parse info
*/
enum rtw_phl_status rtw_phl_twt_parse_setup_info(u8 *pkt, u16 length,
struct rtw_phl_twt_setup_info *setup_info)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
u8 *twt_ele = NULL;
u16 ele_length = length - TOKEN_OFFSET- TOKEN_LENGTH;
PHL_TRACE(COMP_PHL_TWT, _PHL_INFO_, "==> rtw_phl_twt_parse_setup_info()\n");
if (pkt == NULL || setup_info == NULL) {
PHL_TRACE(COMP_PHL_TWT, _PHL_ERR_, "rtw_phl_twt_parse_setup_info(): pkt or setup_info = NULL\n");
return pstatus;
}
twt_ele = pkt + TOKEN_OFFSET + TOKEN_LENGTH;
setup_info->dialog_token = GET_DIALOG_TOKEN(pkt + TOKEN_OFFSET);
pstatus = rtw_phl_twt_parse_element(twt_ele, ele_length,
&setup_info->twt_element);
PHL_TRACE(COMP_PHL_TWT, _PHL_INFO_, "<== rtw_phl_twt_parse_setup_info(): pstatus(%d)\n",
pstatus);
return pstatus;
}
/*
* Parse twt twt flow field from twt teardown frame
* @pkt: the address of twt flow field
* @length: length of pkt
* @twt_flow_info: Parse info
*/
enum rtw_phl_status rtw_phl_twt_parse_flow_field(u8 *pkt, u16 length,
struct rtw_phl_twt_flow_field *twt_flow)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
PHL_TRACE(COMP_PHL_TWT, _PHL_INFO_, "==> rtw_phl_twt_parse_flow_field()\n");
if (pkt == NULL || twt_flow == NULL) {
PHL_TRACE(COMP_PHL_TWT, _PHL_ERR_, "rtw_phl_twt_parse_flow_field(): pkt or twt_flow = NULL\n");
return pstatus;
}
twt_flow->nego_type = GET_NEGOTIATION_TYPE(pkt);
if (RTW_PHL_INDIV_TWT == twt_flow->nego_type ||
RTW_PHL_WAKE_TBTT_INR == twt_flow->nego_type) {
struct rtw_phl_twt_flow_type01 *flow_info =
&twt_flow->info.twt_flow01;
flow_info->twt_flow_id = GET_TWT_FLOW_ID(pkt);
flow_info->teardown_all = GET_TEARDOWN_ALL_TWT(pkt);
PHL_TRACE(COMP_PHL_TWT, _PHL_INFO_, "rtw_phl_twt_parse_flow_field(): nego_type:%d, twt_flow_id:%d, teardown_all:%d\n",
twt_flow->nego_type, flow_info->twt_flow_id,
flow_info->teardown_all);
pstatus = RTW_PHL_STATUS_SUCCESS;
} else if (RTW_PHL_BCAST_TWT == twt_flow->nego_type) {
PHL_TRACE(COMP_PHL_TWT, _PHL_ERR_, "rtw_phl_twt_parse_flow_field(): not support, todo, twt_flow->nego_type(%d)\n",
twt_flow->nego_type);
} else if (RTW_PHL_MANAGE_BCAST_TWT == twt_flow->nego_type) {
struct rtw_phl_twt_flow_type3 *flow_info =
&twt_flow->info.twt_flow3;
flow_info->bcast_twt_id = GET_BROADCAST_TWT_ID(pkt);
flow_info->teardown_all = GET_TEARDOWN_ALL_TWT(pkt);
PHL_TRACE(COMP_PHL_TWT, _PHL_INFO_, "rtw_phl_twt_parse_flow_field(): nego_type:%d, bcast_twt_id:%d, teardown_all:%d\n",
twt_flow->nego_type, flow_info->bcast_twt_id,
flow_info->teardown_all);
pstatus = RTW_PHL_STATUS_SUCCESS;
} else {
PHL_TRACE(COMP_PHL_TWT, _PHL_ERR_, "rtw_phl_twt_parse_flow_field(): Unknown type, twt_flow->nego_type(%d)\n",
twt_flow->nego_type);
}
PHL_TRACE(COMP_PHL_TWT, _PHL_INFO_, "<== rtw_phl_twt_parse_flow_field(): pstatus(%d)\n",
pstatus);
return pstatus;
}
/*
* Tell fw which macid is announced in awake state
* @macid: macid of sta that is in awake state
*/
enum rtw_phl_status rtw_phl_twt_sta_announce_to_fw(void *phl,
u16 macid)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct phl_twt_info *phl_twt_info = NULL;
struct phl_queue *annc_q = NULL;
PHL_TRACE(COMP_PHL_TWT, _PHL_INFO_, "==> rtw_phl_twt_sta_announce_to_fw()\n");
if (false == twt_sup(phl_info)) {
PHL_TRACE(COMP_PHL_TWT, _PHL_ERR_, "rtw_phl_twt_sta_announce_to_fw(): twt_sup == false\n");
return pstatus;
}
if (false == twt_init(phl_info)) {
PHL_TRACE(COMP_PHL_TWT, _PHL_ERR_, "rtw_phl_twt_sta_announce_to_fw(): twt_init == false\n");
return pstatus;
}
phl_twt_info = get_twt_info(phl_info);
annc_q = &phl_twt_info->twt_annc_queue;
pstatus = _twt_sta_announce(phl_info, annc_q, macid);
PHL_TRACE(COMP_PHL_TWT, _PHL_INFO_, "<== rtw_phl_twt_sta_announce_to_fw(): pstatus:%d, macid: %d\n",
pstatus, macid);
return pstatus;
}
#if 0
/*
* Handle twt c2h
* @c: c2h content
*/
enum rtw_phl_status rtw_phl_twt_handle_c2h(void *phl_com, void *c)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
struct rtw_phl_com_t *phl_com_info= (struct rtw_phl_com_t *)phl_com;
struct phl_info_t *phl_info = (struct phl_info_t*)phl_com_info->phl_priv;
struct rtw_c2h_info *c2h = (struct rtw_c2h_info *)c;
if (false == twt_sup(phl_info)) {
PHL_TRACE(COMP_PHL_TWT, _PHL_ERR_, "rtw_phl_twt_handle_c2h(): twt_sup == false\n");
return pstatus;
}
if (false == twt_init(phl_info)) {
PHL_TRACE(COMP_PHL_TWT, _PHL_ERR_, "rtw_phl_twt_handle_c2h(): twt_init == false\n");
return pstatus;
}
if (C2H_FUN_WAIT_ANNC == c2h->c2h_func) {
pstatus = _twt_handle_c2h_wait_annc(phl_info, c2h->content);
}
PHL_TRACE(COMP_PHL_TWT, _PHL_INFO_, "rtw_phl_twt_handle_c2h(): pstatus:%d cat:%d, class:%d, func:%d, len:%d, content:0x%x\n",
pstatus, c2h->c2h_cat, c2h->c2h_class, c2h->c2h_func,
c2h->content_len, *(c2h->content));
return pstatus;
}
#endif
/*
* Handle sta to config twt when sta accept the twt agreement
* @phl_sta: sta entry that you wnat to config twt
* @setup_info: twt setup info
* @id: Output the id of twt confi entry
* Note: for sta mode
*/
enum rtw_phl_status rtw_phl_twt_accept_for_sta_mode(void *phl,
struct rtw_phl_stainfo_t *phl_sta,
struct rtw_phl_twt_setup_info *setup_info, u8 *id)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct rtw_phl_twt_element *element = &setup_info->twt_element;
struct rtw_phl_twt_control *twt_ctrl =&element->twt_ctrl;
PHL_TRACE(COMP_PHL_TWT, _PHL_INFO_, "==> rtw_phl_twt_accept_for_sta_mode()\n");
if (false == twt_sup(phl_info)) {
PHL_TRACE(COMP_PHL_TWT, _PHL_ERR_, "rtw_phl_twt_accept_for_sta_mode(): twt_sup == false\n");
return pstatus;
}
if (false == twt_init(phl_info)) {
PHL_TRACE(COMP_PHL_TWT, _PHL_ERR_, "rtw_phl_twt_accept_for_sta_mode(): twt_init == false\n");
return pstatus;
}
if (RTW_PHL_INDIV_TWT == twt_ctrl->nego_type) {
pstatus = _twt_accept_indiv_by_sta(phl_info, setup_info, phl_sta, id);
} else {
pstatus = _twt_accept_bcast_by_sta(phl_info, setup_info, phl_sta, id);
}
PHL_TRACE(COMP_PHL_TWT, _PHL_INFO_, "<== rtw_phl_twt_accept_for_sta_mode(): pstatus:%d, config_id:%d\n",
pstatus, *id);
return pstatus;
}
/*
* Handle sta to disable twt when sta tx/rx twt teardown frame
* @phl_sta: sta entry that you wnat to config twt
* @twt_flow: twt flow field info
* Note: for sta mode.
*/
enum rtw_phl_status rtw_phl_twt_teardown_for_sta_mode(void *phl,
struct rtw_phl_stainfo_t *phl_sta,
struct rtw_phl_twt_flow_field *twt_flow)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
u8 bitmap =0; /*bitmap of empty config of twt*/
u8 i = 0;
PHL_TRACE(COMP_PHL_TWT, _PHL_INFO_, "==> rtw_phl_twt_teardown_for_sta_mode()\n");
if (false == twt_sup(phl_info)) {
PHL_TRACE(COMP_PHL_TWT, _PHL_ERR_, "rtw_phl_twt_teardown_for_sta_mode(): twt_sup == false\n");
goto exit;
}
if (false == twt_init(phl_info)) {
PHL_TRACE(COMP_PHL_TWT, _PHL_ERR_, "rtw_phl_twt_teardown_for_sta_mode(): twt_init == false\n");
goto exit;
}
pstatus = rtw_phl_twt_teardown_sta(phl, phl_sta, twt_flow, &bitmap);
if (RTW_PHL_STATUS_SUCCESS != pstatus)
goto exit;
if (bitmap == 0)
goto exit;
i = 0;
do {
if (((bitmap >> i) & BIT0)) {
pstatus = rtw_phl_twt_free_twt_config(phl, i);
PHL_TRACE(COMP_PHL_TWT, _PHL_INFO_, "rtw_phl_twt_teardown_for_sta_mode():sta Q is empty in twt config entry(%d), we free it, pstatus:%d \n",
i, pstatus);
}
i++;
} while ((bitmap >> i) != 0);
exit:
PHL_TRACE(COMP_PHL_TWT, _PHL_INFO_, "<== rtw_phl_twt_teardown_for_sta_mode(): pstatus(%d)\n",
pstatus);
return pstatus;
}
#endif /* CONFIG_PHL_TWT */
|
2301_81045437/rtl8852be
|
phl/phl_twt.c
|
C
|
agpl-3.0
| 77,176
|
/******************************************************************************
*
* Copyright(c) 2019 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#ifndef _PHL_TWT_H_
#define _PHL_TWT_H_
#ifdef CONFIG_PHL_TWT
#define MIN_TWT_ELE_LEN 15
#define MAX_NUM_HW_TWT_CONFIG 4
#define CONTROL_LENGTH 1
#define REQUEST_TYPE_LENGTH 2
#define NOMINAL_MINIMUM_TWT_WAKE_DURATION_LENGTH 1
#define TARGET_WAKE_TIME_LENGTH 8
#define NOMINAL_MIN_TWT_WAKE_DURATION_LENGTH 1
#define TWT_WAKE_INTERVAL_MANTISSA_LENGTH 2
#define TWT_CHANNEL_LENGTH 1
#define TWT_FLOW_FIELD_LENGTH 1
#define TOKEN_OFFSET 2
#define TOKEN_LENGTH 1
#define ELEM_ID_LEN 1 /* Length of element id*/
#define ELEM_LEN_LEN 1 /* Length of element Length*/
#define C2H_FUN_WAIT_ANNC 0x0
#define get_twt_info(_phl) (struct phl_twt_info *)(_phl->phl_twt_info)
#define set_config_state(_state, set_state) (_state = set_state)
#define twt_init(_phl) (NULL != (get_twt_info(_phl)))
#define twt_sup(_phl) (((struct phl_info_t *)_phl)->phl_com->dev_cap.twt_sup)
enum phl_operate_config_type {
PHL_OP_TYPE_NONE = 0,
PHL_GET_NEW_CONFIG,
PHL_FREE_CONFIG,
PHL_GET_CONFIG_BY_ID,
PHL_GET_CONFIG_BY_ROLE,
PHL_GET_CONFIG_BY_PARA,
PHL_GET_NEXT_CONFIG,
PHL_GET_HEAD_CONFIG
};
enum phl_twt_action {
PHL_TWT_ACTION_NONE = 0,
PHL_TWT_ACTION_FREE,
PHL_TWT_ACTION_ALLOC,
PHL_TWT_ACTION_ENABLE,
PHL_TWT_ACTION_DISABLE,
PHL_TWT_ACTION_UP_ERROR
};
enum phl_wait_annc_type {
PHL_WAIT_ANNC_DISABLE = 0,
PHL_WAIT_ANNC_ENABLE = 1
};
enum twt_config_state {
twt_config_state_free = 0,
twt_config_state_idle,
twt_config_state_enable,
twt_config_state_error,
twt_config_state_unknown
};
struct phl_twt_config{
u8 idx;
enum twt_config_state state;
struct rtw_wifi_role_t *role;
struct rtw_phl_twt_info twt_info;
struct phl_queue twt_sta_queue; /*struct rtw_twt_sta_info*/
};
struct phl_twt_cfg_info {
u8 twt_cfg_num;
void *twt_cfg_ring; /*struct phl_twt_config*/
};
struct phl_twt_info {
struct phl_twt_cfg_info twt_cfg_info;
struct phl_queue twt_annc_queue;/*struct _twt_announce_info*/
};
struct _twt_announce_info{
_os_list list;
u8 map_offset;
u32 wait_macid_map;
};
struct _twt_compare {
struct rtw_phl_twt_setup_info twt_setup;
struct rtw_wifi_role_t *role;
};
struct _twt_teardown {
struct rtw_phl_stainfo_t *phl_sta;
enum rtw_phl_nego_type nego_type;
u8 id;
};
/* TWT element */
/*Control*/
#define SET_TWT_CONTROL_NDP_PAGING_INDICATOR(_ele_start, _val) \
SET_BITS_TO_LE_1BYTE(_ele_start, 0, 1, _val)
#define SET_TWT_CONTROL_RESPONDER_PM_MODE(_ele_start, _val) \
SET_BITS_TO_LE_1BYTE(_ele_start, 1, 1, _val)
#define SET_TWT_CONTROL_NEGOTIATION_TYPE(_ele_start, _val) \
SET_BITS_TO_LE_1BYTE(_ele_start, 2, 2, _val)
#define SET_TWT_CONTROL_TWT_INFORMATION_FRAME_DISABLE(_ele_start, _val) \
SET_BITS_TO_LE_1BYTE(_ele_start, 4, 1, _val)
#define SET_TWT_CONTROL_WAKE_DURATION_UNIT(_ele_start, _val) \
SET_BITS_TO_LE_1BYTE(_ele_start, 5, 1, _val)
/*Individual TWT Parameter Set field*/
/*Request Type*/
#define SET_TWT_REQ_TYPE_TWT_REQUEST(_ele_start, _val) \
SET_BITS_TO_LE_1BYTE(_ele_start, 0, 1, _val)
#define SET_TWT_REQ_TYPE_TWT_SETUP_COMMAND(_ele_start, _val) \
SET_BITS_TO_LE_1BYTE(_ele_start, 1, 3, _val)
#define SET_TWT_REQ_TYPE_TRIGGER(_ele_start, _val) \
SET_BITS_TO_LE_1BYTE(_ele_start, 4, 1, _val)
#define SET_TWT_REQ_TYPE_IMPLICIT(_ele_start, _val) \
SET_BITS_TO_LE_1BYTE(_ele_start, 5, 1, _val)
#define SET_TWT_REQ_TYPE_FLOW_TYPE(_ele_start, _val) \
SET_BITS_TO_LE_1BYTE(_ele_start, 6, 1, _val)
#define SET_TWT_REQ_TYPE_TWT_FLOW_IDENTIFER(_ele_start, _val) \
SET_BITS_TO_LE_2BYTE(_ele_start, 7, 3, _val)
#define SET_TWT_REQ_TYPE_TWT_WAKE_INTERVAL_EXPONENT(_ele_start, _val) \
SET_BITS_TO_LE_2BYTE(_ele_start, 10, 5, _val)
#define SET_TWT_REQ_TYPE_TWT_PROTECTION(_ele_start, _val) \
SET_BITS_TO_LE_2BYTE(_ele_start, 15, 1, _val)
#define SET_TWT_TARGET_WAKE_TIME_L(_ele_start, _val) \
SET_BITS_TO_LE_4BYTE((_ele_start) + 2, 0, 32, _val)
#define SET_TWT_TARGET_WAKE_TIME_H(_ele_start, _val) \
SET_BITS_TO_LE_4BYTE((_ele_start) + 6, 0, 32, _val)
/*twt group assignment*/
#define SET_TWT_NOMINAL_MINIMUM_TWT_WAKE_DURATION(_ele_start, _offset, _val) \
SET_BITS_TO_LE_1BYTE((_ele_start) + _offset, 0, 8, _val)
#define SET_TWT_TWT_WAKE_INTERVAL_MANTISSA(_ele_start, _offset, _val) \
SET_BITS_TO_LE_2BYTE((_ele_start) + _offset, 0, 16, _val)
#define SET_TWT_TWT_CHANNEL(_ele_start, _offset, _val) \
SET_BITS_TO_LE_1BYTE((_ele_start) + _offset, 0, 8, _val)
/*Broadcast TWT Parameter Set field*/
/*TWT FLOW field*/
#define SET_TWT_FLOW_ID(_ele_start, _val) \
SET_BITS_TO_LE_1BYTE(_ele_start, 0, 3, _val)
#define SET_NEGOTIATION_TYPE(_ele_start, _val) \
SET_BITS_TO_LE_1BYTE(_ele_start, 5, 2, _val)
#define SET_TEARDOWN_ALL_TWT(_ele_start, _val) \
SET_BITS_TO_LE_1BYTE(_ele_start, 7, 1, _val)
#define SET_BROADCAST_TWT_ID(_ele_start, _val) \
SET_BITS_TO_LE_1BYTE(_ele_start, 0, 5, _val)
/*Control*/
/*Bit0*/
#define GET_TWT_CONTROL_NDP_PAGING_INDICATOR(_buf) \
LE_BITS_TO_1BYTE(_buf, 0, 1)
/*Bit1*/
#define GET_TWT_CONTROL_RESPONDER_PM_MODE(_buf) \
LE_BITS_TO_1BYTE(_buf, 1, 1)
/*Bit2-3*/
#define GET_TWT_CONTROL_NEGOTIATION_TYPE(_buf) \
LE_BITS_TO_1BYTE(_buf, 2, 2)
/*Bit4*/
#define GET_TWT_CONTROL_TWT_INFORMATION_FRAME_DISABLE(_buf) \
LE_BITS_TO_1BYTE(_buf, 4, 1)
/*Bit5*/
#define GET_TWT_CONTROL_WAKE_DURATION_UNIT(_buf) \
LE_BITS_TO_1BYTE(_buf, 5, 1)
/*Request Type*/
/*Bit0*/
#define GET_TWT_REQ_TYPE_TWT_REQUEST(_buf) \
LE_BITS_TO_2BYTE(_buf, 0, 1)
/*Bit1-3*/
#define GET_TWT_REQ_TYPE_TWT_SETUP_COMMAND(_buf) \
LE_BITS_TO_2BYTE(_buf, 1, 3)
/*Bit4*/
#define GET_TWT_REQ_TYPE_TRIGGER(_buf) \
LE_BITS_TO_2BYTE(_buf, 4, 1)
/*Bit5*/
#define GET_TWT_REQ_TYPE_IMPLICIT(_buf) \
LE_BITS_TO_2BYTE(_buf, 5, 1)
/*Bit6*/
#define GET_TWT_REQ_TYPE_FLOW_TYPE(_buf) \
LE_BITS_TO_2BYTE(_buf, 6, 1)
/*Bit7-9*/
#define GET_TWT_REQ_TYPE_TWT_FLOW_IDENTIFER(_buf) \
LE_BITS_TO_2BYTE(_buf, 7, 3)
/*Bit10-14*/
#define GET_TWT_REQ_TYPE_TWT_WAKE_INTERVAL_EXPONENT(_buf) \
LE_BITS_TO_2BYTE(_buf, 10, 5)
/*Bit15*/
#define GET_TWT_REQ_TYPE_TWT_PROTECTION(_buf) \
LE_BITS_TO_2BYTE(_buf, 15, 1)
#define GET_TWT_TARGET_WAKE_TIME_L(_buf) \
LE_BITS_TO_4BYTE(_buf, 0, 32)
#define GET_TWT_TARGET_WAKE_TIME_H(_buf) \
LE_BITS_TO_4BYTE((_buf) + 4, 0, 32)
#define GET_TWT_NOMINAL_MINIMUM_TWT_WAKE_DURATION(_buf) \
LE_BITS_TO_1BYTE(_buf, 0 , 8);
#define GET_TWT_TWT_WAKE_INTERVAL_MANTISSA(_buf) \
LE_BITS_TO_2BYTE(_buf, 0, 16);
#define GET_TWT_TWT_CHANNEL(_buf) \
LE_BITS_TO_1BYTE(_buf, 0 ,8);
/*TWT FLOW field*/
/*Bit0-2*/
#define GET_TWT_FLOW_ID(_buf) \
LE_BITS_TO_1BYTE(_buf, 0, 3)
/*Bit5-6*/
#define GET_NEGOTIATION_TYPE(_buf) \
LE_BITS_TO_1BYTE(_buf, 5 , 2);
/*Bit7*/
#define GET_TEARDOWN_ALL_TWT(_buf) \
LE_BITS_TO_1BYTE(_buf, 7, 1)
/*Bit0-4*/
#define GET_BROADCAST_TWT_ID(_buf) \
LE_BITS_TO_1BYTE(_buf, 0, 5)
#define GET_ELE_ID(_buf) \
LE_BITS_TO_1BYTE(_buf, 0, 8);
#define GET_ELE_LEN(_buf) \
LE_BITS_TO_1BYTE(_buf, 0, 8);
#define GET_DIALOG_TOKEN(_buf) \
LE_BITS_TO_1BYTE(_buf, 0, 8);
enum rtw_phl_status phl_twt_init(void *phl);
void phl_twt_deinit(void *phl);
enum rtw_phl_status
rtw_phl_twt_disable_all_twt_by_role(void *phl, struct rtw_wifi_role_t *role);
enum rtw_phl_status rtw_phl_twt_alloc_twt_config(void *phl, struct rtw_wifi_role_t *role,
struct rtw_phl_twt_setup_info setup_info, u8 benable, u8 *id);
enum rtw_phl_status rtw_phl_twt_free_twt_config(void *phl, u8 id);
enum rtw_phl_status rtw_phl_twt_add_sta_info(void *phl, struct rtw_phl_stainfo_t *phl_sta,
u8 config_id, u8 id);
#else
#define phl_twt_init(_phl) RTW_PHL_STATUS_SUCCESS
#define phl_twt_deinit(_phl)
#endif /* CONFIG_PHL_TWT */
#endif /*_PHL_TWT_H_*/
|
2301_81045437/rtl8852be
|
phl/phl_twt.h
|
C
|
agpl-3.0
| 8,156
|
/******************************************************************************
*
* Copyright(c) 2019 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#define _PHL_TX_C_
#include "phl_headers.h"
/**
* this function will be used in read / write pointer mechanism and
* return the number of available read pointer
* @rptr: input, the read pointer
* @wptr: input, the write pointer
* @bndy: input, the boundary of read / write pointer mechanism
*/
u16 phl_calc_avail_rptr(u16 rptr, u16 wptr, u16 bndy)
{
u16 avail_rptr = 0;
if (wptr >= rptr)
avail_rptr = wptr - rptr;
else if (rptr > wptr)
avail_rptr = wptr + (bndy - rptr);
return avail_rptr;
}
/**
* this function will be used in read / write pointer mechanism and
* return the number of available write pointer
* @rptr: input, the read pointer
* @wptr: input, the write pointer
* @bndy: input, the boundary of read / write pointer mechanism
*/
u16 phl_calc_avail_wptr(u16 rptr, u16 wptr, u16 bndy)
{
u16 avail_wptr = 0;
if (rptr > wptr)
avail_wptr = rptr - wptr - 1;
else if (wptr >= rptr)
avail_wptr = rptr + (bndy - wptr) - 1;
return avail_wptr;
}
void phl_dump_sorted_ring(_os_list *sorted_ring)
{
struct phl_ring_status *ring_sts;
u16 i = 0;
PHL_TRACE(COMP_PHL_XMIT, _PHL_INFO_, "==dump sorted ring==\n");
phl_list_for_loop(ring_sts, struct phl_ring_status, sorted_ring,
list) {
i++;
PHL_TRACE(COMP_PHL_XMIT, _PHL_INFO_, "==ring %d==\n", i);
PHL_TRACE(COMP_PHL_XMIT, _PHL_INFO_, "ring_sts->macid = %d\n",
ring_sts->macid);
PHL_TRACE(COMP_PHL_XMIT, _PHL_INFO_, "ring_sts->req_busy = %d\n",
ring_sts->req_busy);
PHL_TRACE(COMP_PHL_XMIT, _PHL_INFO_, "ring_sts->ring_ptr->tid = %d\n",
ring_sts->ring_ptr->tid);
}
}
void phl_dump_tx_plan(_os_list *sta_list)
{
struct phl_tx_plan *tx_plan;
u16 i = 0;
PHL_TRACE(COMP_PHL_XMIT, _PHL_INFO_, "==dump tx plan==\n");
phl_list_for_loop(tx_plan, struct phl_tx_plan, sta_list,
list) {
i++;
PHL_TRACE(COMP_PHL_XMIT, _PHL_INFO_, "==tx plan %d==\n", i);
PHL_TRACE(COMP_PHL_XMIT, _PHL_INFO_, "tx_plan->sleep = %d\n",
tx_plan->sleep);
PHL_TRACE(COMP_PHL_XMIT, _PHL_INFO_, "tx_plan->has_mgnt = %d\n",
tx_plan->has_mgnt);
PHL_TRACE(COMP_PHL_XMIT, _PHL_INFO_, "tx_plan->has_hiq = %d\n",
tx_plan->has_hiq);
phl_dump_sorted_ring(&tx_plan->sorted_ring);
}
}
void phl_dump_t_fctrl_result(_os_list *t_fctrl_result)
{
struct phl_ring_status *ring_sts;
u16 i = 0;
PHL_TRACE(COMP_PHL_XMIT, _PHL_INFO_, "==dump tx flow control result==\n");
phl_list_for_loop(ring_sts, struct phl_ring_status, t_fctrl_result,
list) {
i++;
PHL_TRACE(COMP_PHL_XMIT, _PHL_INFO_, "==ring %d==\n", i);
PHL_TRACE(COMP_PHL_XMIT, _PHL_INFO_, "ring_sts->macid = %d\n",
ring_sts->macid);
PHL_TRACE(COMP_PHL_XMIT, _PHL_INFO_, "ring_sts->req_busy = %d\n",
ring_sts->req_busy);
PHL_TRACE(COMP_PHL_XMIT, _PHL_INFO_, "ring_sts->ring_ptr->tid = %d\n",
ring_sts->ring_ptr->tid);
}
}
void phl_dump_tx_stats(struct rtw_stats *stats)
{
PHL_TRACE(COMP_PHL_XMIT, _PHL_DEBUG_,
"Dump Tx statistics\n"
"tx_byte_uni = %lld\n"
"tx_byte_total = %lld\n"
"tx_tp_kbits = %d\n"
"last_tx_time_ms = %d\n",
stats->tx_byte_uni,
stats->tx_byte_total,
stats->tx_tp_kbits,
stats->last_tx_time_ms);
}
void phl_dump_h2c_pool_stats(struct phl_h2c_pkt_pool *h2c_pkt_pool)
{
PHL_INFO("[h2c_stats] idle cmd %d, idle data %d, idle ldata %d, busy h2c %d.\n",
h2c_pkt_pool->idle_h2c_pkt_cmd_list.cnt,
h2c_pkt_pool->idle_h2c_pkt_data_list.cnt,
h2c_pkt_pool->idle_h2c_pkt_ldata_list.cnt,
h2c_pkt_pool->busy_h2c_pkt_list.cnt);
}
void phl_reset_tx_stats(struct rtw_stats *stats)
{
stats->tx_byte_uni = 0;
stats->tx_byte_total = 0;
stats->tx_tp_kbits = 0;
stats->last_tx_time_ms = 0;
stats->txtp.last_calc_time_ms = 0;
stats->txtp.last_calc_time_ms = 0;
stats->tx_traffic.lvl = RTW_TFC_IDLE;
stats->tx_traffic.sts = 0;
}
const char *phl_tfc_lvl_to_str(u8 lvl)
{
switch (lvl) {
case RTW_TFC_IDLE:
return "IDLE";
case RTW_TFC_ULTRA_LOW:
return "ULTRA_LOW";
case RTW_TFC_LOW:
return "LOW";
case RTW_TFC_MID:
return "MID";
case RTW_TFC_HIGH:
return "HIGH";
default:
return "-";
}
}
void
phl_tx_traffic_upd(struct rtw_stats *sts)
{
u32 tp_k = 0, tp_m = 0;
enum rtw_tfc_lvl tx_tfc_lvl = RTW_TFC_IDLE;
tp_k = sts->tx_tp_kbits;
tp_m = sts->tx_tp_kbits >> 10;
if (tp_m >= TX_HIGH_TP_THRES_MBPS)
tx_tfc_lvl = RTW_TFC_HIGH;
else if (tp_m >= TX_MID_TP_THRES_MBPS)
tx_tfc_lvl = RTW_TFC_MID;
else if (tp_m >= TX_LOW_TP_THRES_MBPS)
tx_tfc_lvl = RTW_TFC_LOW;
else if (tp_k >= TX_ULTRA_LOW_TP_THRES_KBPS)
tx_tfc_lvl = RTW_TFC_ULTRA_LOW;
else
tx_tfc_lvl = RTW_TFC_IDLE;
if (sts->tx_traffic.lvl > tx_tfc_lvl) {
sts->tx_traffic.sts = (TRAFFIC_CHANGED | TRAFFIC_DECREASE);
sts->tx_traffic.lvl = tx_tfc_lvl;
} else if (sts->tx_traffic.lvl < tx_tfc_lvl) {
sts->tx_traffic.sts = (TRAFFIC_CHANGED | TRAFFIC_INCREASE);
sts->tx_traffic.lvl = tx_tfc_lvl;
} else if (sts->tx_traffic.sts &
(TRAFFIC_CHANGED | TRAFFIC_INCREASE | TRAFFIC_DECREASE)) {
sts->tx_traffic.sts &= ~(TRAFFIC_CHANGED | TRAFFIC_INCREASE |
TRAFFIC_DECREASE);
}
}
void phl_update_tx_stats(struct rtw_stats *stats, struct rtw_xmit_req *tx_req)
{
u32 diff_t = 0, cur_time = _os_get_cur_time_ms();
u64 diff_bits = 0;
stats->last_tx_time_ms = cur_time;
stats->tx_byte_total += tx_req->total_len;
stats->txreq_num++;
if (tx_req->mdata.bc == 0 && tx_req->mdata.mc == 0)
stats->tx_byte_uni += tx_req->total_len;
if (0 == stats->txtp.last_calc_time_ms ||
0 == stats->txtp.last_calc_bits) {
stats->txtp.last_calc_time_ms = stats->last_tx_time_ms;
stats->txtp.last_calc_bits = stats->tx_byte_uni * 8;
} else {
if (cur_time >= stats->txtp.last_calc_time_ms) {
diff_t = cur_time - stats->txtp.last_calc_time_ms;
} else {
diff_t = RTW_U32_MAX - stats->txtp.last_calc_time_ms +
cur_time + 1;
}
if (diff_t > TXTP_CALC_DIFF_MS && stats->tx_byte_uni != 0) {
diff_bits = (stats->tx_byte_uni * 8) -
stats->txtp.last_calc_bits;
stats->tx_tp_kbits = (u32)_os_division64(diff_bits,
diff_t);
stats->txtp.last_calc_bits = stats->tx_byte_uni * 8;
stats->txtp.last_calc_time_ms = cur_time;
}
}
}
void phl_tx_statistics(struct phl_info_t *phl_info, struct rtw_xmit_req *tx_req)
{
struct rtw_phl_com_t *phl_com = phl_info->phl_com;
struct rtw_stats *phl_stats = &phl_com->phl_stats;
struct rtw_stats *sta_stats = NULL;
struct rtw_phl_stainfo_t *sta = NULL;
u16 macid = tx_req->mdata.macid;
if (!phl_macid_is_valid(phl_info, macid))
goto dev_stat;
sta = rtw_phl_get_stainfo_by_macid(phl_info, macid);
if (NULL == sta)
goto dev_stat;
sta_stats = &sta->stats;
phl_update_tx_stats(sta_stats, tx_req);
dev_stat:
phl_update_tx_stats(phl_stats, tx_req);
}
static void _phl_free_phl_tring_list(void *phl,
struct rtw_phl_tring_list *ring_list)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
void *drv_priv = phl_to_drvpriv(phl_info);
struct rtw_phl_evt_ops *ops = &phl_info->phl_com->evt_ops;
struct rtw_phl_tx_ring *ring;
struct rtw_xmit_req *tx_req;
u16 rptr = 0;
u8 i = 0;
for (i = 0; i < MAX_PHL_RING_CAT_NUM; i++) {
ring = &ring_list->phl_ring[i];
rptr = (u16)_os_atomic_read(drv_priv, &ring->phl_idx);
while (rptr != ring->core_idx) {
rptr += 1;
if (rptr >= MAX_PHL_RING_ENTRY_NUM)
rptr = 0;
tx_req = (struct rtw_xmit_req *)ring->entry[rptr];
if (NULL == tx_req)
break;
ops->tx_recycle(drv_priv, tx_req);
}
}
_os_kmem_free(drv_priv, ring_list, sizeof(*ring_list));
}
void _phl_init_tx_plan(struct phl_tx_plan * tx_plan)
{
INIT_LIST_HEAD(&tx_plan->list);
tx_plan->sleep = false;
tx_plan->has_mgnt = false;
tx_plan->has_hiq = false;
INIT_LIST_HEAD(&tx_plan->sorted_ring);
}
static struct rtw_phl_tring_list *
_phl_allocate_phl_tring_list(void *phl, u16 macid,
u8 hw_band, u8 hw_wmm, u8 hw_port)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct rtw_phl_tring_list *phl_tring_list = NULL;
void *drv_priv = NULL;
u32 buf_len = 0;
u8 i = 0, dma_ch = 0;
drv_priv = phl_to_drvpriv(phl_info);
buf_len = sizeof(struct rtw_phl_tring_list);
phl_tring_list = (struct rtw_phl_tring_list *)_os_kmem_alloc(drv_priv,
buf_len);
if (NULL != phl_tring_list) {
_os_mem_set(drv_priv, phl_tring_list, 0, buf_len);
INIT_LIST_HEAD(&phl_tring_list->list);
phl_tring_list->macid = macid;
phl_tring_list->band = hw_band;
phl_tring_list->wmm = hw_wmm;
phl_tring_list->port = hw_port;
/*phl_tring_list->mbssid = hw_mbssid*/
for (i = 0; i < MAX_PHL_RING_CAT_NUM; i++) {
phl_tring_list->phl_ring[i].tid = i;
dma_ch = rtw_hal_tx_chnl_mapping(phl_info->hal, macid,
i, hw_band);
phl_tring_list->phl_ring[i].dma_ch = dma_ch;
}
_phl_init_tx_plan(&phl_tring_list->tx_plan);
}
return phl_tring_list;
}
enum rtw_phl_status
phl_register_tx_ring(void *phl, u16 macid, u8 hw_band, u8 hw_wmm, u8 hw_port)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
void *drv_priv = phl_to_drvpriv(phl_info);
struct rtw_phl_tring_list *phl_tring_list = NULL;
enum rtw_phl_status phl_status = RTW_PHL_STATUS_FAILURE;
_os_list *ring_list = NULL;
phl_tring_list = _phl_allocate_phl_tring_list(phl, macid, hw_band, hw_wmm, hw_port);
if (NULL != phl_tring_list) {
ring_list = &phl_info->t_ring_list;
_os_spinlock(drv_priv, &phl_info->t_ring_list_lock, _bh, NULL);
list_add_tail(&phl_tring_list->list, ring_list);
_os_spinunlock(drv_priv, &phl_info->t_ring_list_lock, _bh, NULL);
phl_status = RTW_PHL_STATUS_SUCCESS;
}
return phl_status;
}
enum rtw_phl_status phl_deregister_tx_ring(void *phl, u16 macid)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
void *drv_priv = phl_to_drvpriv(phl_info);
struct rtw_phl_tring_list *phl_tring_list = NULL, *t;
enum rtw_phl_status phl_status = RTW_PHL_STATUS_FAILURE;
_os_list *ring_list = NULL;
ring_list = &phl_info->t_ring_list;
_os_spinlock(drv_priv, &phl_info->t_ring_list_lock, _bh, NULL);
phl_list_for_loop_safe(phl_tring_list, t, struct rtw_phl_tring_list,
ring_list, list) {
if (macid == phl_tring_list->macid) {
list_del(&phl_tring_list->list);
phl_status = RTW_PHL_STATUS_SUCCESS;
break;
}
}
_os_spinunlock(drv_priv, &phl_info->t_ring_list_lock, _bh, NULL);
if (RTW_PHL_STATUS_SUCCESS == phl_status) {
/* defer the free operation to avoid racing with _phl_tx_callback_xxx */
_os_spinlock(drv_priv, &phl_info->t_ring_free_list_lock, _bh, NULL);
list_add_tail(&phl_tring_list->list, &phl_info->t_ring_free_list);
_os_spinunlock(drv_priv, &phl_info->t_ring_free_list_lock, _bh, NULL);
}
return phl_status;
}
void phl_free_deferred_tx_ring(struct phl_info_t *phl_info)
{
void *drv_priv = phl_to_drvpriv(phl_info);
struct rtw_phl_tring_list *phl_tring_list = NULL, *t;
_os_list *ring_list = NULL;
ring_list = &phl_info->t_ring_free_list;
_os_spinlock(drv_priv, &phl_info->t_ring_free_list_lock, _bh, NULL);
if (list_empty(ring_list) == false) {
phl_list_for_loop_safe(phl_tring_list, t, struct rtw_phl_tring_list,
ring_list, list) {
list_del(&phl_tring_list->list);
_phl_free_phl_tring_list(phl_info, phl_tring_list);
}
}
_os_spinunlock(drv_priv, &phl_info->t_ring_free_list_lock, _bh, NULL);
}
struct phl_ring_status *phl_alloc_ring_sts(struct phl_info_t *phl_info)
{
struct phl_ring_sts_pool *ring_sts_pool = phl_info->ring_sts_pool;
struct phl_ring_status *ring_sts = NULL;
_os_spinlock(phl_to_drvpriv(phl_info), &ring_sts_pool->idle_lock, _bh, NULL);
if (false == list_empty(&ring_sts_pool->idle)) {
ring_sts = list_first_entry(&ring_sts_pool->idle,
struct phl_ring_status, list);
list_del(&ring_sts->list);
}
_os_spinunlock(phl_to_drvpriv(phl_info), &ring_sts_pool->idle_lock, _bh, NULL);
return ring_sts;
}
void phl_release_ring_sts(struct phl_info_t *phl_info,
struct phl_ring_status *ring_sts)
{
struct phl_ring_sts_pool *ring_sts_pool = phl_info->ring_sts_pool;
void *drv_priv = NULL;
drv_priv = phl_to_drvpriv(phl_info);
_os_spinlock(drv_priv, &ring_sts_pool->idle_lock, _bh, NULL);
_os_mem_set(drv_priv, ring_sts, 0, sizeof(*ring_sts));
INIT_LIST_HEAD(&ring_sts->list);
list_add_tail(&ring_sts->list, &ring_sts_pool->idle);
_os_spinunlock(drv_priv, &ring_sts_pool->idle_lock, _bh, NULL);
}
void _phl_ring_status_deinit(struct phl_info_t *phl_info)
{
struct phl_ring_sts_pool *ring_sts_pool = NULL;
u16 buf_len = 0;
void *drv_priv = NULL;
FUNCIN();
drv_priv = phl_to_drvpriv(phl_info);
ring_sts_pool = (struct phl_ring_sts_pool *)phl_info->ring_sts_pool;
if (NULL != ring_sts_pool) {
buf_len = sizeof(struct phl_ring_sts_pool);
_os_spinlock_free(drv_priv, &ring_sts_pool->idle_lock);
_os_spinlock_free(drv_priv, &ring_sts_pool->busy_lock);
_os_mem_free(drv_priv, ring_sts_pool, buf_len);
}
FUNCOUT();
}
enum rtw_phl_status _phl_ring_status_init(struct phl_info_t *phl_info)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
struct phl_ring_sts_pool *ring_sts_pool = NULL;
struct phl_ring_status *ring_sts = NULL;
void *drv_priv = NULL;
u16 buf_len = 0;
u8 i = 0;
FUNCIN_WSTS(pstatus);
drv_priv = phl_to_drvpriv(phl_info);
buf_len = sizeof(struct phl_ring_sts_pool);
ring_sts_pool =
(struct phl_ring_sts_pool *)_os_mem_alloc(drv_priv, buf_len);
if (NULL != ring_sts_pool) {
_os_mem_set(drv_priv, ring_sts_pool, 0, buf_len);
INIT_LIST_HEAD(&ring_sts_pool->idle);
INIT_LIST_HEAD(&ring_sts_pool->busy);
_os_spinlock_init(drv_priv, &ring_sts_pool->idle_lock);
_os_spinlock_init(drv_priv, &ring_sts_pool->busy_lock);
for (i = 0; i < MAX_PHL_RING_STATUS_NUMBER; i++) {
ring_sts = &ring_sts_pool->ring_sts[i];
INIT_LIST_HEAD(&ring_sts->list);
_os_spinlock(drv_priv,
(void *)&ring_sts_pool->idle_lock, _bh, NULL);
list_add_tail(&ring_sts->list, &ring_sts_pool->idle);
_os_spinunlock(drv_priv,
(void *)&ring_sts_pool->idle_lock, _bh, NULL);
}
phl_info->ring_sts_pool = ring_sts_pool;
pstatus = RTW_PHL_STATUS_SUCCESS;
}
if (RTW_PHL_STATUS_SUCCESS != pstatus)
_phl_ring_status_deinit(phl_info);
FUNCOUT_WSTS(pstatus);
return pstatus;
}
struct phl_ring_status *
_phl_check_ring_status(struct phl_info_t *phl_info,
struct rtw_phl_tx_ring *ring,
struct rtw_phl_tring_list *tring_list)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
struct phl_ring_status *ring_sts = NULL;
u16 avail = 0, rptr = 0;
void *drv_priv = phl_to_drvpriv(phl_info);
do {
rptr = (u16)_os_atomic_read(drv_priv, &ring->phl_idx);
avail = phl_calc_avail_rptr(rptr, ring->core_idx,
MAX_PHL_RING_ENTRY_NUM);
if (0 == avail) {
ring_sts = NULL;
pstatus = RTW_PHL_STATUS_SUCCESS;
break;
} else {
ring_sts = phl_alloc_ring_sts(phl_info);
if (NULL == ring_sts) {
PHL_ERR("query ring status fail!\n");
pstatus = RTW_PHL_STATUS_RESOURCE;
break;
}
ring_sts->macid = tring_list->macid;
ring_sts->band = tring_list->band;
ring_sts->wmm = tring_list->wmm;
ring_sts->port = tring_list->port;
/*ring_sts->mbssid = tring_list->mbssid;*/
ring_sts->req_busy = avail;
ring_sts->ring_ptr = ring;
rptr += 1;
if (rptr >= MAX_PHL_RING_ENTRY_NUM)
_os_atomic_set(drv_priv, &ring->phl_next_idx, 0);
else
_os_atomic_set(drv_priv, &ring->phl_next_idx, rptr);
pstatus = RTW_PHL_STATUS_SUCCESS;
break;
}
} while (false);
return ring_sts;
}
void _phl_reset_tx_plan(struct phl_info_t *phl_info,
struct phl_tx_plan *tx_plan)
{
struct phl_ring_status *ring_sts, *t;
INIT_LIST_HEAD(&tx_plan->list);
tx_plan->sleep = false;
tx_plan->has_mgnt = false;
tx_plan->has_hiq = false;
phl_list_for_loop_safe(ring_sts, t, struct phl_ring_status,
&tx_plan->sorted_ring, list) {
list_del(&ring_sts->list);
phl_release_ring_sts(phl_info, ring_sts);
}
INIT_LIST_HEAD(&tx_plan->sorted_ring);
}
void _phl_sort_ring_by_tid(struct phl_ring_status *ring_sts,
struct phl_tx_plan *tx_plan,
enum rtw_phl_ring_cat cat)
{
struct phl_ring_status *last_sts = NULL;
if (ring_sts->ring_ptr->tid == 1) {
list_add_tail(&ring_sts->list,
&tx_plan->sorted_ring);
} else if (ring_sts->ring_ptr->tid == 2) {
if (list_empty(&tx_plan->sorted_ring)) {
list_add_tail(&ring_sts->list,
&tx_plan->sorted_ring);
} else {
last_sts = list_last_entry(&tx_plan->sorted_ring,
struct phl_ring_status, list);
if (1 == last_sts->ring_ptr->tid) {
__list_add(&ring_sts->list,
_get_prev(&last_sts->list),
&last_sts->list);
} else {
list_add_tail(&ring_sts->list,
&tx_plan->sorted_ring);
}
}
} else {
list_add(&ring_sts->list,
&tx_plan->sorted_ring);
if (RTW_PHL_RING_CAT_MGNT == cat)
tx_plan->has_mgnt = true;
else if (RTW_PHL_RING_CAT_HIQ == cat)
tx_plan->has_hiq = true;
}
}
void _phl_check_tring_list(struct phl_info_t *phl_info,
struct rtw_phl_tring_list *tring_list,
_os_list *sta_list)
{
struct phl_ring_status *ring_sts = NULL;
struct rtw_phl_tx_ring *ring = NULL;
struct phl_tx_plan *tx_plan = &tring_list->tx_plan;
u8 i = 0;
for (i = 0; i < MAX_PHL_RING_CAT_NUM; i++) {
ring = &tring_list->phl_ring[i];
ring_sts = _phl_check_ring_status(phl_info, ring, tring_list);
if (NULL != ring_sts) {
_phl_sort_ring_by_tid(ring_sts, tx_plan, i);
} else {
continue;
}
}
/* hana_todo: check this macid is sleep or not */
if (!list_empty(&tx_plan->sorted_ring)) {
list_add_tail(&tx_plan->list, sta_list);
}
}
u8 phl_check_xmit_ring_resource(struct phl_info_t *phl_info, _os_list *sta_list)
{
void *drvpriv = phl_to_drvpriv(phl_info);
_os_list *tring_list_head = &phl_info->t_ring_list;
struct rtw_phl_tring_list *tring_list, *t;
_os_spinlock(drvpriv, &phl_info->t_ring_list_lock, _bh, NULL);
phl_list_for_loop_safe(tring_list, t, struct rtw_phl_tring_list,
tring_list_head, list) {
_phl_check_tring_list(phl_info, tring_list, sta_list);
}
#ifdef SDIO_TX_THREAD
/**
* when SDIO_TX_THREAD is enabled,
* clearing variable "phl_sw_tx_more" in function "phl_tx_sdio_thrd_hdl"
*/
#else
_os_atomic_set(drvpriv, &phl_info->phl_sw_tx_more, 0);
#endif
_os_spinunlock(drvpriv, &phl_info->t_ring_list_lock, _bh, NULL);
if (true == list_empty(sta_list))
return false;
else
return true;
}
void phl_tx_flow_ctrl(struct phl_info_t *phl_info, _os_list *sta_list)
{
_os_list *t_fctrl_result = &phl_info->t_fctrl_result;
_os_list *tid_entry[MAX_PHL_RING_CAT_NUM] = {0};
struct phl_tx_plan *tx_plan, *tp;
struct phl_ring_status *ring_sts = NULL, *ts;
u8 tid = 0;
_os_mem_set(phl_to_drvpriv(phl_info), tid_entry, 0,
sizeof(_os_list *) * MAX_PHL_RING_CAT_NUM);
phl_list_for_loop_safe(tx_plan, tp, struct phl_tx_plan, sta_list,
list) {
/* drop power saving station */
if (true == tx_plan->sleep) {
list_del(&tx_plan->list);
_phl_reset_tx_plan(phl_info, tx_plan);
continue;
}
if (true == tx_plan->has_hiq) {
ring_sts = list_first_entry(&tx_plan->sorted_ring,
struct phl_ring_status, list);
list_del(&ring_sts->list);
list_add(&ring_sts->list, t_fctrl_result);
}
if (true == tx_plan->has_mgnt) {
ring_sts = list_first_entry(&tx_plan->sorted_ring,
struct phl_ring_status, list);
list_del(&ring_sts->list);
list_add(&ring_sts->list, t_fctrl_result);
}
/* todo: drop station which has reached tx limit */
phl_list_for_loop_safe(ring_sts, ts, struct phl_ring_status,
&tx_plan->sorted_ring, list) {
list_del(&ring_sts->list);
tid = ring_sts->ring_ptr->tid;
/* todo: drop tid which has reached tx limit */
/* sw tx cnt limit */
if (NULL == tid_entry[tid]) {
list_add_tail(&ring_sts->list, t_fctrl_result);
} else {
__list_add(&ring_sts->list, tid_entry[tid],
_get_next(tid_entry[tid]));
}
tid_entry[tid] = &ring_sts->list;
}
/* clear tx plan */
list_del(&tx_plan->list);
_phl_reset_tx_plan(phl_info, tx_plan);
}
}
enum rtw_phl_status phl_register_handler(struct rtw_phl_com_t *phl_com,
struct rtw_phl_handler *handler)
{
enum rtw_phl_status phl_status = RTW_PHL_STATUS_FAILURE;
_os_tasklet *tasklet = NULL;
_os_workitem *workitem = NULL;
void *drv_priv = phlcom_to_drvpriv(phl_com);
FUNCIN_WSTS(phl_status);
if (handler->type == RTW_PHL_HANDLER_PRIO_HIGH) {
tasklet = &handler->os_handler.u.tasklet;
phl_status = _os_tasklet_init(drv_priv, tasklet,
handler->callback, handler);
} else if (handler->type == RTW_PHL_HANDLER_PRIO_LOW) {
workitem = &handler->os_handler.u.workitem;
phl_status = _os_workitem_init(drv_priv, workitem,
handler->callback, workitem);
} else {
PHL_TRACE(COMP_PHL_DBG, _PHL_INFO_, "[WARNING] unknown handle type(%d)\n",
handler->type);
}
if (RTW_PHL_STATUS_SUCCESS != phl_status)
phl_deregister_handler(phl_com, handler);
FUNCOUT_WSTS(phl_status);
return phl_status;
}
enum rtw_phl_status phl_deregister_handler(
struct rtw_phl_com_t *phl_com, struct rtw_phl_handler *handler)
{
enum rtw_phl_status phl_status = RTW_PHL_STATUS_FAILURE;
_os_tasklet *tasklet = NULL;
_os_workitem *workitem = NULL;
void *drv_priv = phlcom_to_drvpriv(phl_com);
FUNCIN_WSTS(phl_status);
if (handler->type == RTW_PHL_HANDLER_PRIO_HIGH) {
tasklet = &handler->os_handler.u.tasklet;
phl_status = _os_tasklet_deinit(drv_priv, tasklet);
} else if (handler->type == RTW_PHL_HANDLER_PRIO_LOW) {
workitem = &handler->os_handler.u.workitem;
phl_status = _os_workitem_deinit(drv_priv, workitem);
} else {
PHL_TRACE(COMP_PHL_DBG, _PHL_INFO_, "[WARNING] unknown handle type(%d)\n",
handler->type);
}
if (RTW_PHL_STATUS_SUCCESS != phl_status) {
PHL_TRACE(COMP_PHL_DBG, _PHL_INFO_,
"[WARNING] deregister handler fail (status = 0x%08X)\n",
phl_status);
}
FUNCOUT_WSTS(phl_status);
return phl_status;
}
enum rtw_phl_status phl_schedule_handler(
struct rtw_phl_com_t *phl_com, struct rtw_phl_handler *handler)
{
enum rtw_phl_status phl_status = RTW_PHL_STATUS_FAILURE;
_os_tasklet *tasklet = NULL;
_os_workitem *workitem = NULL;
void *drv_priv = phlcom_to_drvpriv(phl_com);
FUNCIN_WSTS(phl_status);
if (handler->type == RTW_PHL_HANDLER_PRIO_HIGH) {
tasklet = &handler->os_handler.u.tasklet;
phl_status = _os_tasklet_schedule(drv_priv, tasklet);
} else if (handler->type == RTW_PHL_HANDLER_PRIO_LOW) {
workitem = &handler->os_handler.u.workitem;
phl_status = _os_workitem_schedule(drv_priv, workitem);
} else {
PHL_TRACE(COMP_PHL_DBG, _PHL_INFO_, "[WARNING] unknown handle type(%d)\n",
handler->type);
}
FUNCOUT_WSTS(phl_status);
return phl_status;
}
static enum rtw_phl_status enqueue_h2c_pkt(
struct phl_info_t *phl_info,
struct phl_queue *pool_list,
struct rtw_h2c_pkt *h2c_pkt, u8 pos)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
void *drv = phl_to_drvpriv(phl_info);
_os_spinlockfg sp_flags;
if (h2c_pkt != NULL) {
_os_spinlock(drv, &pool_list->lock, _irq, &sp_flags);
if (_tail == pos)
list_add_tail(&h2c_pkt->list, &pool_list->queue);
else if (_first == pos)
list_add(&h2c_pkt->list, &pool_list->queue);
pool_list->cnt++;
_os_spinunlock(drv, &pool_list->lock, _irq, &sp_flags);
pstatus = RTW_PHL_STATUS_SUCCESS;
}
return pstatus;
}
static struct rtw_h2c_pkt *dequeue_h2c_pkt(struct phl_info_t *phl_info,
struct phl_queue *pool_list)
{
struct rtw_h2c_pkt *h2c_pkt = NULL;
void *drv = phl_to_drvpriv(phl_info);
_os_spinlockfg sp_flags;
_os_spinlock(drv, &pool_list->lock, _irq, &sp_flags);
if (list_empty(&pool_list->queue)) {
h2c_pkt = NULL;
} else {
h2c_pkt = list_first_entry(&pool_list->queue, struct rtw_h2c_pkt, list);
list_del(&h2c_pkt->list);
pool_list->cnt--;
}
_os_spinunlock(drv, &pool_list->lock, _irq, &sp_flags);
return h2c_pkt;
}
static void _phl_reset_h2c_pkt(struct phl_info_t *phl_info,
struct rtw_h2c_pkt *h2c_pkt,
u32 buf_len)
{
enum rtw_h2c_pkt_type type = h2c_pkt->type;
_os_mem_set(phl_to_drvpriv(phl_info), h2c_pkt->vir_head, 0, buf_len);
h2c_pkt->buf_len = buf_len;
h2c_pkt->id = 0;
h2c_pkt->host_idx = 0;
h2c_pkt->data_len = 0;
h2c_pkt->h2c_seq = 0;
switch (type) {
case H2CB_TYPE_CMD:
h2c_pkt->vir_data = h2c_pkt->vir_head + FWCMD_HDR_LEN + _WD_BODY_LEN;
h2c_pkt->vir_tail = h2c_pkt->vir_data;
h2c_pkt->vir_end = h2c_pkt->vir_data + H2C_CMD_LEN;
break;
case H2CB_TYPE_DATA:
h2c_pkt->vir_data = h2c_pkt->vir_head + FWCMD_HDR_LEN + _WD_BODY_LEN;
h2c_pkt->vir_tail = h2c_pkt->vir_data;
h2c_pkt->vir_end = h2c_pkt->vir_data + H2C_DATA_LEN;
break;
case H2CB_TYPE_LONG_DATA:
h2c_pkt->vir_data = h2c_pkt->vir_head + FWCMD_HDR_LEN + _WD_BODY_LEN;
h2c_pkt->vir_tail = h2c_pkt->vir_data;
h2c_pkt->vir_end = h2c_pkt->vir_data + H2C_LONG_DATA_LEN;
break;
case H2CB_TYPE_MAX:
PHL_TRACE(COMP_PHL_DBG, _PHL_DEBUG_, "_phl_reset_h2c_pkt(): Unsupported case:%d, please check it\n",
type);
break;
default:
PHL_TRACE(COMP_PHL_DBG, _PHL_DEBUG_, "_phl_reset_h2c_pkt(): Unrecognize case:%d, please check it\n",
type);
break;
}
}
enum rtw_phl_status phl_enqueue_busy_h2c_pkt(struct phl_info_t *phl_info,
struct rtw_h2c_pkt *h2c_pkt, u8 pos)
{
struct phl_h2c_pkt_pool *h2c_pkt_pool =
(struct phl_h2c_pkt_pool *)phl_info->h2c_pool;
struct phl_queue *queue = &h2c_pkt_pool->busy_h2c_pkt_list;
return enqueue_h2c_pkt(phl_info, queue, h2c_pkt, pos);
}
enum rtw_phl_status phl_enqueue_idle_h2c_pkt(
struct phl_info_t *phl_info,
struct rtw_h2c_pkt *h2c_pkt)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
struct phl_h2c_pkt_pool *h2c_pkt_pool =
(struct phl_h2c_pkt_pool *)phl_info->h2c_pool;
struct phl_queue *queue = NULL;
int *idle_cnt = NULL;
u32 buf_len = 0;
if (!h2c_pkt)
return pstatus;
switch (h2c_pkt->type) {
case H2CB_TYPE_CMD:
buf_len = FWCMD_HDR_LEN + _WD_BODY_LEN + H2C_CMD_LEN;
queue = &h2c_pkt_pool->idle_h2c_pkt_cmd_list;
idle_cnt = &h2c_pkt_pool->idle_h2c_pkt_cmd_list.cnt;
break;
case H2CB_TYPE_DATA:
buf_len = FWCMD_HDR_LEN + _WD_BODY_LEN + H2C_DATA_LEN;
queue = &h2c_pkt_pool->idle_h2c_pkt_data_list;
idle_cnt = &h2c_pkt_pool->idle_h2c_pkt_data_list.cnt;
break;
case H2CB_TYPE_LONG_DATA:
buf_len = FWCMD_HDR_LEN + _WD_BODY_LEN + H2C_LONG_DATA_LEN;
queue = &h2c_pkt_pool->idle_h2c_pkt_ldata_list;
idle_cnt = &h2c_pkt_pool->idle_h2c_pkt_ldata_list.cnt;
break;
case H2CB_TYPE_MAX:
PHL_ERR("%s : cannot find the matching case(%d).\n",
__func__, h2c_pkt->type);
break;
default:
PHL_ERR("%s : cannot find the matching cases(%d).\n",
__func__, h2c_pkt->type);
break;
}
_phl_reset_h2c_pkt(phl_info, h2c_pkt, buf_len);
pstatus = enqueue_h2c_pkt(phl_info, queue, h2c_pkt, _tail);
PHL_TRACE(COMP_PHL_DBG, _PHL_DEBUG_, "%s : remaining %d (type %d).\n",
__func__, *idle_cnt, h2c_pkt->type);
return pstatus;
}
struct rtw_h2c_pkt *phl_query_busy_h2c_pkt(struct phl_info_t *phl_info)
{
struct phl_h2c_pkt_pool *h2c_pkt_pool = NULL;
struct rtw_h2c_pkt *h2c_pkt = NULL;
struct phl_queue *queue = NULL;
h2c_pkt_pool = (struct phl_h2c_pkt_pool *)phl_info->h2c_pool;
queue = &h2c_pkt_pool->busy_h2c_pkt_list;
h2c_pkt = dequeue_h2c_pkt(phl_info, queue);
return h2c_pkt;
}
struct rtw_h2c_pkt *phl_query_idle_h2c_pkt(struct phl_info_t *phl_info, u8 type)
{
struct phl_h2c_pkt_pool *h2c_pkt_pool = NULL;
struct rtw_h2c_pkt *h2c_pkt = NULL;
enum rtw_h2c_pkt_type h2c_type = (enum rtw_h2c_pkt_type)type;
struct phl_queue *queue = NULL;
int *idle_cnt = NULL;
h2c_pkt_pool = (struct phl_h2c_pkt_pool *)phl_info->h2c_pool;
switch (h2c_type) {
case H2CB_TYPE_CMD:
queue = &h2c_pkt_pool->idle_h2c_pkt_cmd_list;
idle_cnt = &h2c_pkt_pool->idle_h2c_pkt_cmd_list.cnt;
break;
case H2CB_TYPE_DATA:
queue = &h2c_pkt_pool->idle_h2c_pkt_data_list;
idle_cnt = &h2c_pkt_pool->idle_h2c_pkt_data_list.cnt;
break;
case H2CB_TYPE_LONG_DATA:
queue = &h2c_pkt_pool->idle_h2c_pkt_ldata_list;
idle_cnt = &h2c_pkt_pool->idle_h2c_pkt_ldata_list.cnt;
break;
case H2CB_TYPE_MAX:
PHL_TRACE(COMP_PHL_DBG, _PHL_DEBUG_, "phl_query_idle_h2c_pkt(): Unsupported case:%d, please check it\n",
h2c_type);
break;
default:
PHL_TRACE(COMP_PHL_DBG, _PHL_DEBUG_, "phl_query_idle_h2c_pkt(): Unrecognize case:%d, please check it\n",
h2c_type);
break;
}
PHL_TRACE(COMP_PHL_DBG, _PHL_DEBUG_,
"phl_query_idle_h2c_pkt => remaining %d (type %d).\n",
*idle_cnt, h2c_type);
h2c_pkt = dequeue_h2c_pkt(phl_info, queue);
return h2c_pkt;
}
#if 0
static enum rtw_phl_status phl_release_target_h2c_pkt(
struct phl_info_t *phl_info,
struct phl_h2c_pkt_pool *h2c_pkt_pool,
struct rtw_h2c_pkt *h2c_pkt)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
if (h2c_pkt_pool != NULL && h2c_pkt != NULL) {
phl_enqueue_idle_h2c_pkt(phl_info, h2c_pkt);
pstatus = RTW_PHL_STATUS_SUCCESS;
}
return pstatus;
}
#endif
static void _phl_free_h2c_pkt(struct phl_info_t *phl_info,
struct rtw_h2c_pkt *h2c_pkt_buf)
{
u16 i = 0;
struct rtw_h2c_pkt *h2c_pkt = h2c_pkt_buf;
struct phl_hci_trx_ops *hci_trx_ops = phl_info->hci_trx_ops;
if (NULL != h2c_pkt) {
for (i = 0; i < MAX_H2C_PKT_NUM; i++) {
if (NULL == h2c_pkt->vir_head)
continue;
hci_trx_ops->free_h2c_pkt_buf(phl_info, h2c_pkt);
h2c_pkt->vir_head = NULL;
h2c_pkt->cache = false;
h2c_pkt++;
}
_os_mem_free(phl_to_drvpriv(phl_info), h2c_pkt_buf,
sizeof(struct rtw_h2c_pkt) * MAX_H2C_PKT_NUM);
h2c_pkt_buf = NULL;
}
}
struct rtw_h2c_pkt *_phl_alloc_h2c_pkt(struct phl_info_t *phl_info,
struct phl_h2c_pkt_pool *h2c_pool)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
struct phl_hci_trx_ops *hci_trx_ops = phl_info->hci_trx_ops;
struct rtw_h2c_pkt *h2c_pkt = NULL;
struct rtw_h2c_pkt *h2c_pkt_root = NULL;
struct phl_h2c_pkt_pool *h2c_pkt_pool = h2c_pool;
u32 buf_len = 0;
int i;
buf_len = sizeof(struct rtw_h2c_pkt) * MAX_H2C_PKT_NUM;
h2c_pkt_root = _os_mem_alloc(phl_to_drvpriv(phl_info), buf_len);
h2c_pkt = h2c_pkt_root;
if (h2c_pkt != NULL) {
for (i = 0; i < MAX_H2C_PKT_NUM; i++) {
h2c_pkt->cache = false;
buf_len = get_h2c_size_by_range(i);
hci_trx_ops->alloc_h2c_pkt_buf(phl_info, h2c_pkt, buf_len);
if (NULL == h2c_pkt->vir_head) {
pstatus = RTW_PHL_STATUS_RESOURCE;
break;
}
h2c_pkt->buf_len = buf_len;
h2c_pkt->vir_data = h2c_pkt->vir_head + FWCMD_HDR_LEN + _WD_BODY_LEN;
h2c_pkt->vir_tail = h2c_pkt->vir_data;
INIT_LIST_HEAD(&h2c_pkt->list);
if (i < _H2CB_CMD_QLEN) {
h2c_pkt->type = H2CB_TYPE_CMD;
h2c_pkt->vir_end = h2c_pkt->vir_data + H2C_CMD_LEN;
enqueue_h2c_pkt(phl_info,
&h2c_pkt_pool->idle_h2c_pkt_cmd_list, h2c_pkt, _tail);
} else if (i < _H2CB_CMD_QLEN + _H2CB_DATA_QLEN) {
h2c_pkt->type = H2CB_TYPE_DATA;
h2c_pkt->vir_end = h2c_pkt->vir_data + H2C_DATA_LEN;
enqueue_h2c_pkt(phl_info,
&h2c_pkt_pool->idle_h2c_pkt_data_list, h2c_pkt, _tail);
} else {
h2c_pkt->type = H2CB_TYPE_LONG_DATA;
h2c_pkt->vir_end = h2c_pkt->vir_data + H2C_LONG_DATA_LEN;
enqueue_h2c_pkt(phl_info,
&h2c_pkt_pool->idle_h2c_pkt_ldata_list, h2c_pkt, _tail);
}
h2c_pkt++;
pstatus = RTW_PHL_STATUS_SUCCESS;
}
}
if (RTW_PHL_STATUS_SUCCESS != pstatus) {
_phl_free_h2c_pkt(phl_info, h2c_pkt_root);
h2c_pkt_root = NULL;
}
return h2c_pkt_root;
}
static void _phl_free_h2c_pool(struct phl_info_t *phl_info)
{
struct phl_h2c_pkt_pool *h2c_pkt_pool = NULL;
void *drv_priv = phl_to_drvpriv(phl_info);
FUNCIN();
h2c_pkt_pool = phl_info->h2c_pool;
if (NULL != h2c_pkt_pool) {
h2c_pkt_pool->idle_h2c_pkt_cmd_list.cnt = 0;
h2c_pkt_pool->idle_h2c_pkt_data_list.cnt = 0;
h2c_pkt_pool->idle_h2c_pkt_ldata_list.cnt = 0;
_phl_free_h2c_pkt(phl_info, h2c_pkt_pool->h2c_pkt_buf);
h2c_pkt_pool->h2c_pkt_buf = NULL;
_os_spinlock_free(drv_priv,
&h2c_pkt_pool->idle_h2c_pkt_cmd_list.lock);
_os_spinlock_free(drv_priv,
&h2c_pkt_pool->idle_h2c_pkt_data_list.lock);
_os_spinlock_free(drv_priv,
&h2c_pkt_pool->idle_h2c_pkt_ldata_list.lock);
_os_spinlock_free(drv_priv,
&h2c_pkt_pool->busy_h2c_pkt_list.lock);
_os_spinlock_free(drv_priv,
&h2c_pkt_pool->recycle_lock);
_os_mem_free(phl_to_drvpriv(phl_info), h2c_pkt_pool,
sizeof(struct phl_h2c_pkt_pool));
}
FUNCOUT();
}
enum rtw_phl_status
_phl_alloc_h2c_pool(struct phl_info_t *phl_info)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
struct phl_h2c_pkt_pool *h2c_pkt_pool = NULL;
struct rtw_h2c_pkt *h2c_pkt_buf = NULL;
void *drv_priv = NULL;
FUNCIN_WSTS(pstatus);
drv_priv = phl_to_drvpriv(phl_info);
h2c_pkt_pool = _os_mem_alloc(drv_priv, sizeof(struct phl_h2c_pkt_pool));
if (NULL != h2c_pkt_pool) {
INIT_LIST_HEAD(&h2c_pkt_pool->idle_h2c_pkt_cmd_list.queue);
h2c_pkt_pool->idle_h2c_pkt_cmd_list.cnt = 0;
INIT_LIST_HEAD(&h2c_pkt_pool->idle_h2c_pkt_data_list.queue);
h2c_pkt_pool->idle_h2c_pkt_data_list.cnt = 0;
INIT_LIST_HEAD(&h2c_pkt_pool->idle_h2c_pkt_ldata_list.queue);
h2c_pkt_pool->idle_h2c_pkt_ldata_list.cnt = 0;
INIT_LIST_HEAD(&h2c_pkt_pool->busy_h2c_pkt_list.queue);
h2c_pkt_pool->busy_h2c_pkt_list.cnt = 0;
_os_spinlock_init(drv_priv,
&h2c_pkt_pool->idle_h2c_pkt_cmd_list.lock);
_os_spinlock_init(drv_priv,
&h2c_pkt_pool->idle_h2c_pkt_data_list.lock);
_os_spinlock_init(drv_priv,
&h2c_pkt_pool->idle_h2c_pkt_ldata_list.lock);
_os_spinlock_init(drv_priv,
&h2c_pkt_pool->busy_h2c_pkt_list.lock);
_os_spinlock_init(drv_priv,
&h2c_pkt_pool->recycle_lock);
h2c_pkt_buf = _phl_alloc_h2c_pkt(phl_info, h2c_pkt_pool);
if (NULL == h2c_pkt_buf) {
_os_spinlock_free(drv_priv,
&h2c_pkt_pool->idle_h2c_pkt_cmd_list.lock);
_os_spinlock_free(drv_priv,
&h2c_pkt_pool->idle_h2c_pkt_data_list.lock);
_os_spinlock_free(drv_priv,
&h2c_pkt_pool->idle_h2c_pkt_ldata_list.lock);
_os_spinlock_free(drv_priv,
&h2c_pkt_pool->busy_h2c_pkt_list.lock);
_os_spinlock_free(drv_priv,
&h2c_pkt_pool->recycle_lock);
_os_mem_free(drv_priv, h2c_pkt_pool, sizeof(struct phl_h2c_pkt_pool));
h2c_pkt_pool = NULL;
pstatus = RTW_PHL_STATUS_RESOURCE;
} else {
h2c_pkt_pool->h2c_pkt_buf = h2c_pkt_buf;
pstatus = RTW_PHL_STATUS_SUCCESS;
}
}
if (RTW_PHL_STATUS_SUCCESS == pstatus)
phl_info->h2c_pool = h2c_pkt_pool;
FUNCOUT_WSTS(pstatus);
return pstatus;
}
void
phl_trx_free_handler(void *phl)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct rtw_phl_handler *tx_handler = &phl_info->phl_tx_handler;
struct rtw_phl_handler *rx_handler = &phl_info->phl_rx_handler;
struct rtw_phl_handler *event_handler = &phl_info->phl_event_handler;
FUNCIN();
phl_deregister_handler(phl_info->phl_com, event_handler);
phl_deregister_handler(phl_info->phl_com, rx_handler);
phl_deregister_handler(phl_info->phl_com, tx_handler);
FUNCOUT();
}
void
phl_trx_free_sw_rsc(void *phl)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct phl_hci_trx_ops *hci_trx_ops = phl_info->hci_trx_ops;
void *drv_priv = NULL;
FUNCIN();
drv_priv = phl_to_drvpriv(phl_info);
_phl_free_h2c_pool(phl_info);
hci_trx_ops->hci_trx_deinit(phl_info);
phl_rx_deinit(phl_info);
_phl_ring_status_deinit(phl_info);
_os_spinlock_free(drv_priv, &phl_info->t_ring_list_lock);
_os_spinlock_free(drv_priv, &phl_info->rx_ring_lock);
_os_spinlock_free(drv_priv, &phl_info->t_fctrl_result_lock);
_os_spinlock_free(drv_priv, &phl_info->t_ring_free_list_lock);
FUNCOUT();
}
enum rtw_phl_status phl_datapath_start(struct phl_info_t *phl_info)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
struct phl_hci_trx_ops *hci_trx_ops = phl_info->hci_trx_ops;
do {
pstatus = hci_trx_ops->trx_cfg(phl_info);
if (RTW_PHL_STATUS_SUCCESS != pstatus)
break;
rtw_hal_notification(phl_info->hal, MSG_EVT_DATA_PATH_START, HW_BAND_MAX);
}while (false);
return pstatus;
}
void phl_datapath_stop(struct phl_info_t *phl_info)
{
struct phl_hci_trx_ops *hci_trx_ops = phl_info->hci_trx_ops;
hci_trx_ops->trx_stop(phl_info);
rtw_hal_notification(phl_info->hal, MSG_EVT_DATA_PATH_STOP, HW_BAND_MAX);
phl_free_deferred_tx_ring(phl_info);
}
void phl_datapath_deinit(struct phl_info_t *phl_info)
{
phl_trx_free_handler(phl_info);
phl_trx_free_sw_rsc(phl_info);
}
enum rtw_phl_status phl_datapath_init(struct phl_info_t *phl_info)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
struct phl_hci_trx_ops *hci_trx_ops = phl_info->hci_trx_ops;
struct rtw_phl_handler *event_handler = &phl_info->phl_event_handler;
void *drv_priv = NULL;
FUNCIN_WSTS(pstatus);
drv_priv = phl_to_drvpriv(phl_info);
do {
#ifdef CONFIG_PHL_CPU_BALANCE_RX
_os_workitem *workitem = &event_handler->os_handler.u.workitem;
#endif
INIT_LIST_HEAD(&phl_info->t_ring_list);
INIT_LIST_HEAD(&phl_info->t_fctrl_result);
INIT_LIST_HEAD(&phl_info->t_ring_free_list);
_os_spinlock_init(drv_priv, &phl_info->t_ring_list_lock);
_os_spinlock_init(drv_priv, &phl_info->rx_ring_lock);
_os_spinlock_init(drv_priv, &phl_info->t_fctrl_result_lock);
_os_spinlock_init(drv_priv, &phl_info->t_ring_free_list_lock);
#ifdef CONFIG_PHL_CPU_BALANCE_RX
event_handler->type = RTW_PHL_HANDLER_PRIO_LOW;
_os_workitem_config_cpu(drv_priv, workitem, "RX_PHL_0", CPU_ID_RX_CORE_0);
#else
event_handler->type = RTW_PHL_HANDLER_PRIO_HIGH;
#endif
event_handler->callback = phl_event_indicator;
event_handler->context = phl_info;
event_handler->drv_priv = drv_priv;
event_handler->status = 0;
pstatus = phl_register_handler(phl_info->phl_com, event_handler);
if (RTW_PHL_STATUS_SUCCESS != pstatus)
break;
pstatus = _phl_ring_status_init(phl_info);
if (RTW_PHL_STATUS_SUCCESS != pstatus)
break;
pstatus = phl_rx_init(phl_info);
if (RTW_PHL_STATUS_SUCCESS != pstatus)
break;
pstatus = hci_trx_ops->hci_trx_init(phl_info);
if (RTW_PHL_STATUS_SUCCESS != pstatus)
break;
/* allocate h2c pkt */
pstatus = _phl_alloc_h2c_pool(phl_info);
if (RTW_PHL_STATUS_SUCCESS != pstatus)
break;
}while (false);
if (RTW_PHL_STATUS_SUCCESS != pstatus)
phl_datapath_deinit(phl_info);
FUNCOUT_WSTS(pstatus);
return pstatus;
}
static enum rtw_phl_status
_phl_tx_pwr_notify(void *phl)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_SUCCESS;
#ifdef SDIO_TX_THREAD
phl_tx_sdio_wake_thrd((struct phl_info_t *)phl);
#else
pstatus = rtw_phl_tx_req_notify(phl);
#endif
return pstatus;
}
#ifdef CONFIG_POWER_SAVE
static void _phl_req_pwr_cb(void *priv, struct phl_msg *msg)
{
struct phl_info_t *phl_info = (struct phl_info_t *)priv;
if (MSG_MDL_ID_FIELD(msg->msg_id) == PHL_MDL_TX)
_os_atomic_set(phl_to_drvpriv(phl_info),
&phl_info->phl_sw_tx_req_pwr,
0);
else
_os_atomic_set(phl_to_drvpriv(phl_info),
&phl_info->phl_sw_rx_req_pwr,
0);
if (IS_MSG_FAIL(msg->msg_id) || IS_MSG_CANCEL(msg->msg_id)) {
PHL_WARN("%s(): request power failure.\n", __func__);
return;
}
if (MSG_MDL_ID_FIELD(msg->msg_id) == PHL_MDL_TX)
_phl_tx_pwr_notify(priv);
else if (MSG_MDL_ID_FIELD(msg->msg_id) == PHL_MDL_RX)
rtw_phl_start_rx_process(priv);
}
static void _phl_datapath_req_pwr(struct phl_info_t *phl_info, u8 type)
{
enum rtw_phl_status psts = RTW_PHL_STATUS_FAILURE;
struct phl_msg msg = {0};
struct phl_msg_attribute attr = {0};
PHL_TRACE(COMP_PHL_DBG, _PHL_WARNING_,
"%s(): [DATA_CTRL] SW datapath paused by ps module and request power\n",
__func__);
SET_MSG_MDL_ID_FIELD(msg.msg_id, ((type == PHL_CTRL_TX) ? PHL_MDL_TX : PHL_MDL_RX));
SET_MSG_EVT_ID_FIELD(msg.msg_id, MSG_EVT_TRX_PWR_REQ);
attr.completion.completion = _phl_req_pwr_cb;
attr.completion.priv = phl_info;
/* shall set req_pwr flag first before sending req_pwr msg */
if (PHL_CTRL_TX == type)
_os_atomic_set(phl_to_drvpriv(phl_info),
&phl_info->phl_sw_tx_req_pwr,
1);
else
_os_atomic_set(phl_to_drvpriv(phl_info),
&phl_info->phl_sw_rx_req_pwr,
1);
psts = phl_disp_eng_send_msg(phl_info, &msg, &attr, NULL);
if (RTW_PHL_STATUS_SUCCESS != psts) {
PHL_WARN("%s(): CANNOT send msg to request power.\n", __func__);
if (PHL_CTRL_TX == type)
_os_atomic_set(phl_to_drvpriv(phl_info),
&phl_info->phl_sw_tx_req_pwr,
0);
else
_os_atomic_set(phl_to_drvpriv(phl_info),
&phl_info->phl_sw_rx_req_pwr,
0);
}
}
static bool _phl_datapath_chk_pwr(struct phl_info_t *phl_info, u8 type)
{
void *drvpriv = phl_to_drvpriv(phl_info);
enum data_ctrl_mdl pause_id = 0;
_os_atomic *trx_more;
_os_atomic *req_pwr;
if (type == PHL_CTRL_TX) {
pause_id = phl_info->pause_tx_id;
trx_more = &phl_info->phl_sw_tx_more;
req_pwr = &phl_info->phl_sw_tx_req_pwr;
} else {
pause_id = phl_info->pause_rx_id;
trx_more = &phl_info->phl_sw_rx_more;
req_pwr = &phl_info->phl_sw_rx_req_pwr;
}
if (pause_id & ~(DATA_CTRL_MDL_PS)) {
PHL_TRACE(COMP_PHL_DBG, _PHL_WARNING_,
"%s(): [DATA_CTRL] SW datapath paused by module(0x%x)\n",
__func__,
pause_id);
return false;
}
if (true == _os_atomic_read(drvpriv, trx_more) &&
false == _os_atomic_read(drvpriv, req_pwr))
_phl_datapath_req_pwr(phl_info, type);
return true;
}
#endif
bool phl_datapath_chk_trx_pause(struct phl_info_t *phl_info, u8 type)
{
void *drvpriv = phl_to_drvpriv(phl_info);
_os_atomic *sw_sts;
if (type == PHL_CTRL_TX)
sw_sts = &phl_info->phl_sw_tx_sts;
else
sw_sts = &phl_info->phl_sw_rx_sts;
if (PHL_TX_STATUS_SW_PAUSE == _os_atomic_read(drvpriv, sw_sts)) {
#ifdef CONFIG_POWER_SAVE
_phl_datapath_chk_pwr(phl_info, type);
#endif
return true;
}
return false;
}
void rtw_phl_tx_stop(void *phl)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct phl_hci_trx_ops *hci_trx_ops = phl_info->hci_trx_ops;
/* Pause SW Tx */
hci_trx_ops->req_tx_stop(phl_info);
}
void rtw_phl_tx_resume(void *phl)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct phl_hci_trx_ops *hci_trx_ops = phl_info->hci_trx_ops;
/* Resume SW Tx */
hci_trx_ops->trx_resume(phl_info, PHL_CTRL_TX);
}
enum rtw_phl_status rtw_phl_tx_req_notify(void *phl)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
pstatus = phl_schedule_handler(phl_info->phl_com,
&phl_info->phl_tx_handler);
return pstatus;
}
enum rtw_phl_status rtw_phl_add_tx_req(void *phl,
struct rtw_xmit_req *tx_req)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
struct rtw_phl_tring_list *tring_list, *t;
struct rtw_phl_tx_ring *ring = NULL;
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
void *drv_priv = NULL;
_os_list *list_head = &phl_info->t_ring_list;
u16 macid = tx_req->mdata.macid;
u8 tid = tx_req->mdata.tid;
u16 ring_res = 0, rptr = 0;
drv_priv = phl_to_drvpriv(phl_info);
_os_spinlock(drv_priv, &phl_info->t_ring_list_lock, _bh, NULL);
phl_list_for_loop_safe(tring_list, t, struct rtw_phl_tring_list,
list_head, list) {
if (macid != tring_list->macid) {
continue;
} else {
/* hana_todo check mgnt frame case */
ring = &tring_list->phl_ring[tid];
break;
}
}
if (NULL != ring) {
rptr = (u16)_os_atomic_read(drv_priv, &ring->phl_idx);
ring_res = phl_calc_avail_wptr(rptr, ring->core_idx,
MAX_PHL_RING_ENTRY_NUM);
if (ring_res > 0) {
ring->core_idx =
(ring->core_idx + 1) % MAX_PHL_RING_ENTRY_NUM;
ring->entry[ring->core_idx] = (u8 *)tx_req;
phl_tx_statistics(phl_info, tx_req);
#ifdef CONFIG_PHL_TX_DBG
if (tx_req->tx_dbg.en_dbg) {
tx_req->tx_dbg.core_add_tx_t =
_os_get_cur_time_us();
}
#endif /* CONFIG_PHL_TX_DBG */
_os_atomic_set(drv_priv, &phl_info->phl_sw_tx_more, 1);
pstatus = RTW_PHL_STATUS_SUCCESS;
} else {
PHL_TRACE(COMP_PHL_DBG, _PHL_INFO_, "no ring resource to add new tx request!\n");
pstatus = RTW_PHL_STATUS_RESOURCE;
}
}
_os_spinunlock(drv_priv, &phl_info->t_ring_list_lock, _bh, NULL);
return pstatus;
}
u16 rtw_phl_tring_rsc(void *phl, u16 macid, u8 tid)
{
struct rtw_phl_tring_list *tring_list, *t;
struct rtw_phl_tx_ring *ring = NULL;
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
void *drv_priv = NULL;
_os_list *list_head = &phl_info->t_ring_list;
u16 ring_res = 0, rptr = 0;
drv_priv = phl_to_drvpriv(phl_info);
phl_list_for_loop_safe(tring_list, t, struct rtw_phl_tring_list,
list_head, list) {
if (macid != tring_list->macid) {
continue;
} else {
/* hana_todo check mgnt frame case */
ring = &tring_list->phl_ring[tid];
break;
}
}
if (NULL != ring) {
rptr = (u16)_os_atomic_read(drv_priv, &ring->phl_idx);
ring_res = phl_calc_avail_rptr(rptr, ring->core_idx,
MAX_PHL_RING_ENTRY_NUM);
}
return ring_res;
}
enum rtw_phl_status phl_indic_pkt_complete(void *phl)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct rtw_evt_info_t *evt_info = &phl_info->phl_com->evt_info;
void *drv_priv = phl_to_drvpriv(phl_info);
do {
_os_spinlock(drv_priv, &evt_info->evt_lock, _bh, NULL);
evt_info->evt_bitmap |= RTW_PHL_EVT_TX_RECYCLE;
_os_spinunlock(drv_priv, &evt_info->evt_lock, _bh, NULL);
pstatus = phl_schedule_handler(phl_info->phl_com,
&phl_info->phl_event_handler);
} while (false);
return pstatus;
}
enum rtw_phl_status rtw_phl_recycle_tx_buf(void *phl, u8 *tx_buf_ptr)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
#ifdef CONFIG_USB_HCI
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct phl_hci_trx_ops *hci_trx_ops = phl_info->hci_trx_ops;
pstatus = hci_trx_ops->recycle_tx_buf(phl, tx_buf_ptr);
#endif
return pstatus;
}
static enum rtw_phl_status
_phl_cfg_tx_ampdu(void *phl, struct rtw_phl_stainfo_t *sta)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
enum rtw_hal_status hsts = RTW_HAL_STATUS_FAILURE;
hsts = rtw_hal_cfg_tx_ampdu(phl_info->hal, sta);
if (RTW_HAL_STATUS_SUCCESS != hsts)
goto fail;
return RTW_PHL_STATUS_SUCCESS;
fail:
return RTW_PHL_STATUS_FAILURE;
}
#ifdef CONFIG_CMD_DISP
enum rtw_phl_status
phl_cmd_cfg_ampdu_hdl(struct phl_info_t *phl_info, u8 *param)
{
struct rtw_phl_stainfo_t *sta = (struct rtw_phl_stainfo_t *)param;
PHL_INFO(" %s(), sta = %p !\n", __func__, sta);
return _phl_cfg_tx_ampdu(phl_info, sta);
}
#endif
enum rtw_phl_status
rtw_phl_cmd_cfg_ampdu(void *phl,
struct rtw_wifi_role_t *wrole,
struct rtw_phl_stainfo_t *sta,
enum phl_cmd_type cmd_type,
u32 cmd_timeout)
{
enum rtw_phl_status sts = RTW_PHL_STATUS_FAILURE;
#ifdef CONFIG_CMD_DISP
sts = phl_cmd_enqueue(phl,
wrole->hw_band,
MSG_EVT_CFG_AMPDU,
(u8 *)sta, 0,
NULL,
cmd_type, cmd_timeout);
if (is_cmd_failure(sts)) {
/* Send cmd success, but wait cmd fail*/
sts = RTW_PHL_STATUS_FAILURE;
} else if (sts != RTW_PHL_STATUS_SUCCESS) {
/* Send cmd fail */
sts = RTW_PHL_STATUS_FAILURE;
}
return sts;
#else
PHL_ERR("%s : CONFIG_CMD_DISP need to be enabled for MSG_EVT_CFG_AMPDU !! \n", __func__);
return sts;
#endif
}
void
phl_tx_watchdog(struct phl_info_t *phl_info)
{
struct phl_hci_trx_ops *trx_ops = phl_info->hci_trx_ops;
struct rtw_stats *phl_stats = &phl_info->phl_com->phl_stats;
phl_tx_traffic_upd(phl_stats);
trx_ops->tx_watchdog(phl_info);
}
enum data_ctrl_mdl _phl_get_ctrl_mdl(enum phl_module_id id)
{
enum data_ctrl_mdl ctrl_mdl = DATA_CTRL_MDL_NONE;
switch (id) {
case PHL_MDL_PHY_MGNT:
ctrl_mdl = DATA_CTRL_MDL_CMD_CTRLER;
break;
case PHL_MDL_SER:
ctrl_mdl = DATA_CTRL_MDL_SER;
break;
case PHL_MDL_POWER_MGNT:
ctrl_mdl = DATA_CTRL_MDL_PS;
break;
default:
PHL_WARN("Unknown PHL module(%d) try to control datapath and is skipped!\n",
id);
ctrl_mdl = DATA_CTRL_MDL_NONE;
break;
}
return ctrl_mdl;
}
enum rtw_phl_status
_phl_poll_hw_tx_done(void)
{
PHL_TRACE(COMP_PHL_XMIT, _PHL_ERR_, "[DATA_CTRL] Polling hw tx done is not supported now\n");
return RTW_PHL_STATUS_FAILURE;
}
enum rtw_phl_status
_phl_hw_tx_resume(void)
{
PHL_TRACE(COMP_PHL_XMIT, _PHL_ERR_, "[DATA_CTRL] Resume hw tx not is supported now\n");
return RTW_PHL_STATUS_FAILURE;
}
enum rtw_phl_status
_phl_sw_tx_resume(struct phl_info_t *phl_info, struct phl_data_ctl_t *ctl)
{
enum rtw_phl_status sts = RTW_PHL_STATUS_FAILURE;
struct phl_hci_trx_ops *ops = phl_info->hci_trx_ops;
enum data_ctrl_mdl ctrl_mdl = _phl_get_ctrl_mdl(ctl->id);
if (!TEST_STATUS_FLAG(phl_info->pause_tx_id, ctrl_mdl)) {
PHL_WARN("[DATA_CTRL] module %d resume sw tx fail, sw tx is paused by module 0x%x\n",
ctl->id, phl_info->pause_tx_id);
return sts;
}
CLEAR_STATUS_FLAG(phl_info->pause_tx_id, ctrl_mdl);
if (DATA_CTRL_MDL_NONE != phl_info->pause_tx_id) {
PHL_WARN("[DATA_CTRL] sw tx is still paused by tx pause id = 0x%x\n",
phl_info->pause_tx_id);
sts = RTW_PHL_STATUS_SUCCESS;
} else {
ops->trx_resume(phl_info, PHL_CTRL_TX);
sts = rtw_phl_tx_req_notify(phl_info);
}
return sts;
}
void
_phl_sw_tx_rst(struct phl_info_t *phl_info)
{
struct phl_hci_trx_ops *ops = phl_info->hci_trx_ops;
ops->trx_reset(phl_info, PHL_CTRL_TX);
}
enum rtw_phl_status
_phl_sw_tx_pause(struct phl_info_t *phl_info,
struct phl_data_ctl_t *ctl,
bool rst_sw)
{
enum rtw_phl_status sts = RTW_PHL_STATUS_FAILURE;
struct phl_hci_trx_ops *ops = phl_info->hci_trx_ops;
void *drv = phl_to_drvpriv(phl_info);
u32 i = 0;
enum data_ctrl_mdl ctrl_mdl = _phl_get_ctrl_mdl(ctl->id);
if (PHL_TX_STATUS_SW_PAUSE ==
_os_atomic_read(drv, &phl_info->phl_sw_tx_sts)) {
PHL_TRACE(COMP_PHL_XMIT, _PHL_INFO_,
"[DATA_CTRL] SW tx has been paused by module(0x%x)\n",
phl_info->pause_tx_id);
SET_STATUS_FLAG(phl_info->pause_tx_id, ctrl_mdl);
PHL_TRACE(COMP_PHL_XMIT, _PHL_INFO_,
"[DATA_CTRL] Update pause sw tx id(0x%x) by module(%d)\n",
phl_info->pause_tx_id, ctl->id);
sts = RTW_PHL_STATUS_SUCCESS;
return sts;
}
if (PHL_TX_STATUS_STOP_INPROGRESS ==
_os_atomic_read(drv, &phl_info->phl_sw_tx_sts)) {
PHL_TRACE(COMP_PHL_XMIT, _PHL_INFO_,
"[DATA_CTRL] SW tx has been requested to pause by module(0x%x)\n",
phl_info->pause_tx_id);
SET_STATUS_FLAG(phl_info->pause_tx_id, ctrl_mdl);
PHL_TRACE(COMP_PHL_XMIT, _PHL_INFO_,
"[DATA_CTRL] Update pause sw tx id(0x%x) by module(%d)\n",
phl_info->pause_tx_id, ctl->id);
sts = RTW_PHL_STATUS_SUCCESS;
return sts;
}
/* requset sw tx to stop */
ops->req_tx_stop(phl_info);
/*
* notify sw tx one last time
* and poll if it receviced the stop request and paused itself
*/
if (RTW_PHL_STATUS_SUCCESS == rtw_phl_tx_req_notify(phl_info)) {
for (i = 0; i < POLL_SW_TX_PAUSE_CNT; i++) {
if (true == ops->is_tx_pause(phl_info)) {
SET_STATUS_FLAG(phl_info->pause_tx_id, ctrl_mdl);
sts = RTW_PHL_STATUS_SUCCESS;
break;
}
_os_sleep_ms(drv, POLL_SW_TX_PAUSE_MS);
}
if (RTW_PHL_STATUS_SUCCESS != sts) {
SET_STATUS_FLAG(phl_info->pause_tx_id, ctrl_mdl);
sts = RTW_PHL_STATUS_CMD_TIMEOUT;
PHL_TRACE(COMP_PHL_XMIT, _PHL_ERR_,
"[DATA_CTRL] Module(%d) polling sw tx pause timeout (%d ms)!\n",
ctl->id,
(POLL_SW_TX_PAUSE_MS * POLL_SW_TX_PAUSE_CNT));
} else {
if (true == rst_sw) {
PHL_TRACE(COMP_PHL_XMIT, _PHL_WARNING_,
"[DATA_CTRL] Pause Tx with reset is not supported now! requested by module(%d)\n",
ctl->id);
}
}
} else {
PHL_TRACE(COMP_PHL_XMIT, _PHL_WARNING_, "[DATA_CTRL] Schedule sw tx process fail!\n");
}
return sts;
}
enum rtw_phl_status
_phl_poll_hw_rx_done(void)
{
PHL_TRACE(COMP_PHL_RECV, _PHL_ERR_, "[DATA_CTRL] Polling hw rx done is not supported now\n");
return RTW_PHL_STATUS_FAILURE;
}
enum rtw_phl_status
_phl_hw_rx_resume(void)
{
PHL_TRACE(COMP_PHL_RECV, _PHL_ERR_, "[DATA_CTRL] Resume hw rx not is supported now\n");
return RTW_PHL_STATUS_FAILURE;
}
enum rtw_phl_status
_phl_sw_rx_resume(struct phl_info_t *phl_info, struct phl_data_ctl_t *ctl)
{
enum rtw_phl_status sts = RTW_PHL_STATUS_FAILURE;
struct phl_hci_trx_ops *ops = phl_info->hci_trx_ops;
enum data_ctrl_mdl ctrl_mdl = _phl_get_ctrl_mdl(ctl->id);
if (!TEST_STATUS_FLAG(phl_info->pause_rx_id, ctrl_mdl)) {
PHL_WARN("[DATA_CTRL] module %d resume sw rx fail, sw rx is paused by module 0x%x\n",
ctl->id, phl_info->pause_rx_id);
return sts;
}
CLEAR_STATUS_FLAG(phl_info->pause_rx_id, ctrl_mdl);
if (DATA_CTRL_MDL_NONE != phl_info->pause_rx_id) {
PHL_WARN("[DATA_CTRL] sw rx is still paused by rx pause id = 0x%x\n",
phl_info->pause_rx_id);
sts = RTW_PHL_STATUS_SUCCESS;
} else {
ops->trx_resume(phl_info, PHL_CTRL_RX);
sts = rtw_phl_start_rx_process(phl_info);
}
return sts;
}
void
_phl_sw_rx_rst(struct phl_info_t *phl_info)
{
struct phl_hci_trx_ops *ops = phl_info->hci_trx_ops;
ops->trx_reset(phl_info, PHL_CTRL_RX);
}
enum rtw_phl_status
_phl_sw_rx_pause(struct phl_info_t *phl_info,
struct phl_data_ctl_t *ctl,
bool rst_sw)
{
enum rtw_phl_status sts = RTW_PHL_STATUS_FAILURE;
struct phl_hci_trx_ops *ops = phl_info->hci_trx_ops;
void *drv = phl_to_drvpriv(phl_info);
u32 i = 0;
enum data_ctrl_mdl ctrl_mdl = _phl_get_ctrl_mdl(ctl->id);
if (PHL_RX_STATUS_SW_PAUSE ==
_os_atomic_read(drv, &phl_info->phl_sw_rx_sts)) {
PHL_TRACE(COMP_PHL_RECV, _PHL_INFO_,
"[DATA_CTRL] SW rx has been paused by module(0x%x)\n",
phl_info->pause_rx_id);
SET_STATUS_FLAG(phl_info->pause_rx_id, ctrl_mdl);
PHL_TRACE(COMP_PHL_RECV, _PHL_INFO_,
"[DATA_CTRL] Update pause sw rx id(0x%x) by module(%d)\n",
phl_info->pause_rx_id, ctl->id);
sts = RTW_PHL_STATUS_SUCCESS;
return sts;
}
if (PHL_RX_STATUS_STOP_INPROGRESS ==
_os_atomic_read(drv, &phl_info->phl_sw_rx_sts)) {
PHL_TRACE(COMP_PHL_RECV, _PHL_INFO_,
"[DATA_CTRL] SW rx has been requested to pause by module(0x%x)\n",
phl_info->pause_rx_id);
SET_STATUS_FLAG(phl_info->pause_rx_id, ctrl_mdl);
PHL_TRACE(COMP_PHL_RECV, _PHL_INFO_,
"[DATA_CTRL] Update pause sw rx id(0x%x) by module(%d)\n",
phl_info->pause_rx_id, ctl->id);
sts = RTW_PHL_STATUS_SUCCESS;
return sts;
}
/* requset sw rx to stop */
ops->req_rx_stop(phl_info);
/*
* notify sw rx one last time
* and poll if it receviced the stop request and paused itself
*/
if (RTW_PHL_STATUS_SUCCESS == rtw_phl_start_rx_process(phl_info)) {
for (i = 0; i < POLL_SW_RX_PAUSE_CNT; i++) {
if (true == ops->is_rx_pause(phl_info)) {
SET_STATUS_FLAG(phl_info->pause_rx_id, ctrl_mdl);
sts = RTW_PHL_STATUS_SUCCESS;
break;
}
_os_sleep_ms(drv, POLL_SW_RX_PAUSE_MS);
}
if (RTW_PHL_STATUS_SUCCESS != sts) {
SET_STATUS_FLAG(phl_info->pause_rx_id, ctrl_mdl);
sts = RTW_PHL_STATUS_CMD_TIMEOUT;
PHL_TRACE(COMP_PHL_RECV, _PHL_ERR_,
"[DATA_CTRL] Module(%d) polling sw rx pause timeout (%d ms)!\n",
ctl->id,
(POLL_SW_RX_PAUSE_MS * POLL_SW_RX_PAUSE_CNT));
} else {
if (true == rst_sw) {
PHL_TRACE(COMP_PHL_RECV, _PHL_WARNING_,
"[DATA_CTRL] Pause Rx with reset is not supported now! requested by module(%d)\n",
ctl->id);
}
}
} else {
PHL_TRACE(COMP_PHL_RECV, _PHL_WARNING_, "[DATA_CTRL] Schedule sw rx process fail!\n");
}
return sts;
}
enum rtw_phl_status
_phl_hw_trx_rst_resume(struct phl_info_t *phl_info)
{
void *drv = phl_to_drvpriv(phl_info);
if (false == _os_atomic_read(drv, &phl_info->is_hw_trx_pause)) {
PHL_TRACE(COMP_PHL_XMIT, _PHL_WARNING_, "[DATA_CTRL] HW T/Rx is not paused\n");
}
if (rtw_hal_lv1_rcvy(phl_info->hal, RTW_PHL_SER_LV1_SER_RCVY_STEP_2) !=
RTW_HAL_STATUS_SUCCESS) {
PHL_TRACE(COMP_PHL_XMIT, _PHL_ERR_, "[DATA_CTRL] Reset and Resume HW T/Rx fail\n");
return RTW_PHL_STATUS_FAILURE;
} else {
_os_atomic_set(drv, &phl_info->is_hw_trx_pause, false);
return RTW_PHL_STATUS_SUCCESS;
}
}
enum rtw_phl_status
_phl_hw_trx_pause(struct phl_info_t *phl_info)
{
void *drv = phl_to_drvpriv(phl_info);
if (true == _os_atomic_read(drv, &phl_info->is_hw_trx_pause)) {
PHL_TRACE(COMP_PHL_XMIT, _PHL_WARNING_, "[DATA_CTRL] HW T/Rx is already paused\n");
}
if (rtw_hal_lv1_rcvy(phl_info->hal, RTW_PHL_SER_LV1_RCVY_STEP_1) !=
RTW_HAL_STATUS_SUCCESS) {
PHL_TRACE(COMP_PHL_XMIT, _PHL_ERR_, "[DATA_CTRL] Pause HW T/Rx fail\n");
return RTW_PHL_STATUS_FAILURE;
} else {
_os_atomic_set(drv, &phl_info->is_hw_trx_pause, true);
return RTW_PHL_STATUS_SUCCESS;
}
}
enum rtw_phl_status
_phl_trx_sw_pause(struct phl_info_t *phl_info, struct phl_data_ctl_t *ctl)
{
enum rtw_phl_status sts = RTW_PHL_STATUS_FAILURE;
do {
sts = _phl_sw_tx_pause(phl_info, ctl, false);
if (RTW_PHL_STATUS_SUCCESS != sts) {
PHL_TRACE(COMP_PHL_XMIT, _PHL_WARNING_, "[DATA_CTRL] Pause SW Tx fail in PHL_DATA_CTL_TRX_SW_PAUSE!\n");
break;
}
sts = _phl_sw_rx_pause(phl_info, ctl, false);
if (RTW_PHL_STATUS_SUCCESS != sts) {
PHL_TRACE(COMP_PHL_RECV, _PHL_WARNING_, "[DATA_CTRL] Pause SW Rx fail in PHL_DATA_CTL_TRX_SW_PAUSE!\n");
break;
}
} while (false);
return sts;
}
enum rtw_phl_status
_phl_trx_sw_resume(struct phl_info_t *phl_info, struct phl_data_ctl_t *ctl)
{
enum rtw_phl_status sts = RTW_PHL_STATUS_FAILURE;
do {
sts = _phl_sw_tx_resume(phl_info, ctl);
if (RTW_PHL_STATUS_SUCCESS != sts) {
PHL_TRACE(COMP_PHL_XMIT, _PHL_WARNING_, "[DATA_CTRL] Resume SW Tx fail in PHL_DATA_CTL_TRX_SW_RESUME!\n");
break;
}
sts = _phl_sw_rx_resume(phl_info, ctl);
if (RTW_PHL_STATUS_SUCCESS != sts) {
PHL_TRACE(COMP_PHL_RECV, _PHL_WARNING_, "[DATA_CTRL] Resume SW Rx fail in PHL_DATA_CTL_TRX_SW_RESUME!\n");
break;
}
} while (false);
return sts;
}
enum rtw_phl_status
_phl_trx_pause_w_rst(struct phl_info_t *phl_info,
struct phl_data_ctl_t *ctl,
struct phl_msg *msg)
{
enum rtw_phl_status sts = RTW_PHL_STATUS_FAILURE;
enum data_ctrl_err_code *err_sts = NULL;
if (msg->outbuf && msg->outlen == sizeof(*err_sts))
err_sts = (enum data_ctrl_err_code *)msg->outbuf;
do {
sts = _phl_sw_tx_pause(phl_info, ctl, false);
if (RTW_PHL_STATUS_SUCCESS != sts) {
if (err_sts) {
if (RTW_PHL_STATUS_CMD_TIMEOUT == sts)
*err_sts = CTRL_ERR_SW_TX_PAUSE_POLLTO;
else
*err_sts = CTRL_ERR_SW_TX_PAUSE_FAIL;
}
PHL_TRACE(COMP_PHL_XMIT, _PHL_WARNING_, "[DATA_CTRL] Pause SW Tx fail in PHL_DATA_CTL_TRX_PAUSE_W_RST!\n");
break;
}
sts = _phl_hw_trx_pause(phl_info);
if (RTW_PHL_STATUS_SUCCESS != sts) {
if (err_sts)
*err_sts = CTRL_ERR_HW_TRX_PAUSE_FAIL;
PHL_TRACE(COMP_PHL_XMIT, _PHL_WARNING_, "[DATA_CTRL] Pause HW T/Rx fail in PHL_DATA_CTL_TRX_PAUSE_W_RST!\n");
break;
}
sts = _phl_sw_rx_pause(phl_info, ctl, false);
if (RTW_PHL_STATUS_SUCCESS != sts) {
if (err_sts) {
if (RTW_PHL_STATUS_CMD_TIMEOUT == sts)
*err_sts = CTRL_ERR_SW_RX_PAUSE_POLLTO;
else
*err_sts = CTRL_ERR_SW_RX_PAUSE_FAIL;
}
PHL_TRACE(COMP_PHL_RECV, _PHL_WARNING_, "[DATA_CTRL] Pause SW Rx fail in PHL_DATA_CTL_TRX_PAUSE_W_RST!\n");
break;
}
_phl_sw_tx_rst(phl_info);
_phl_sw_rx_rst(phl_info);
} while (false);
return sts;
}
enum rtw_phl_status
_phl_trx_resume_w_rst(struct phl_info_t *phl_info,
struct phl_data_ctl_t *ctl,
struct phl_msg *msg)
{
enum rtw_phl_status sts = RTW_PHL_STATUS_FAILURE;
enum data_ctrl_err_code *err_sts = NULL;
if (msg->outbuf && msg->outlen == sizeof(*err_sts))
err_sts = (enum data_ctrl_err_code *)msg->outbuf;
do {
sts = _phl_sw_rx_resume(phl_info, ctl);
if (RTW_PHL_STATUS_SUCCESS != sts) {
if (err_sts)
*err_sts = CTRL_ERR_SW_RX_RESUME_FAIL;
PHL_TRACE(COMP_PHL_RECV, _PHL_WARNING_, "[DATA_CTRL] Resume SW Rx fail in PHL_DATA_CTL_TRX_RESUME_W_RST!\n");
break;
}
sts = _phl_hw_trx_rst_resume(phl_info);
if (RTW_PHL_STATUS_SUCCESS != sts) {
if (err_sts)
*err_sts = CTRL_ERR_HW_TRX_RESUME_FAIL;
PHL_TRACE(COMP_PHL_XMIT, _PHL_WARNING_, "[DATA_CTRL] Resume HW T/Rx fail in PHL_DATA_CTL_TRX_RESUME_W_RST!\n");
break;
}
sts = _phl_sw_tx_resume(phl_info, ctl);
if (RTW_PHL_STATUS_SUCCESS != sts) {
if (err_sts)
*err_sts = CTRL_ERR_SW_TX_RESUME_FAIL;
PHL_TRACE(COMP_PHL_XMIT, _PHL_WARNING_, "[DATA_CTRL] Resume SW Tx fail in PHL_DATA_CTL_TRX_RESUME_W_RST!\n");
break;
}
} while (false);
return sts;
}
enum rtw_phl_status
phl_data_ctrler(struct phl_info_t *phl_info, struct phl_data_ctl_t *ctl,
struct phl_msg *msg)
{
enum rtw_phl_status sts = RTW_PHL_STATUS_FAILURE;
if (NULL == ctl) {
PHL_WARN("phl_tx_ctrler(): input ctl is NULL\n");
return RTW_PHL_STATUS_FAILURE;
}
switch (ctl->cmd) {
case PHL_DATA_CTL_HW_TRX_RST_RESUME:
sts = _phl_hw_trx_rst_resume(phl_info);
break;
case PHL_DATA_CTL_HW_TRX_PAUSE:
sts = _phl_hw_trx_pause(phl_info);
break;
case PHL_DATA_CTL_SW_TX_RESUME:
sts = _phl_sw_tx_resume(phl_info, ctl);
break;
case PHL_DATA_CTL_SW_RX_RESUME:
sts = _phl_sw_rx_resume(phl_info, ctl);
break;
case PHL_DATA_CTL_SW_TX_PAUSE:
sts = _phl_sw_tx_pause(phl_info, ctl, false);
break;
case PHL_DATA_CTL_SW_RX_PAUSE:
sts = _phl_sw_rx_pause(phl_info, ctl, false);
break;
case PHL_DATA_CTL_SW_TX_RESET:
_phl_sw_tx_rst(phl_info);
sts = RTW_PHL_STATUS_SUCCESS;
break;
case PHL_DATA_CTL_SW_RX_RESET:
_phl_sw_rx_rst(phl_info);
sts = RTW_PHL_STATUS_SUCCESS;
break;
case PHL_DATA_CTL_TRX_SW_PAUSE:
sts = _phl_trx_sw_pause(phl_info, ctl);
break;
case PHL_DATA_CTL_TRX_SW_RESUME:
sts = _phl_trx_sw_resume(phl_info, ctl);
break;
case PHL_DATA_CTL_TRX_PAUSE_W_RST:
sts = _phl_trx_pause_w_rst(phl_info, ctl, msg);
break;
case PHL_DATA_CTL_TRX_RESUME_W_RST:
sts = _phl_trx_resume_w_rst(phl_info, ctl, msg);
break;
default:
PHL_TRACE(COMP_PHL_XMIT, _PHL_INFO_,
"Unknown data control command(%d)!\n", ctl->cmd);
break;
}
return sts;
}
|
2301_81045437/rtl8852be
|
phl/phl_tx.c
|
C
|
agpl-3.0
| 62,651
|
/******************************************************************************
*
* Copyright(c) 2019 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#ifndef __PHL_TX_H_
#define __PHL_TX_H_
void phl_datapath_deinit(struct phl_info_t *phl_info);
enum rtw_phl_status phl_datapath_init(struct phl_info_t *phl_info);
enum rtw_phl_status phl_datapath_start(struct phl_info_t *phl_info);
void phl_datapath_stop(struct phl_info_t *phl_info);
void phl_trx_free_handler(void *phl);
void phl_trx_free_sw_rsc(void *phl);
bool phl_datapath_chk_trx_pause(struct phl_info_t *phl_info, u8 type);
struct phl_ring_status *phl_alloc_ring_sts(struct phl_info_t *phl_info);
void phl_release_ring_sts(struct phl_info_t *phl_info,
struct phl_ring_status *ring_sts);
u8 phl_check_xmit_ring_resource(struct phl_info_t *phl_info,
_os_list *sta_list);
void phl_tx_flow_ctrl(struct phl_info_t *phl_info, _os_list *sta_list);
enum rtw_phl_status rtw_phl_tx_req_notify(void *phl);
enum rtw_phl_status phl_register_handler(struct rtw_phl_com_t *phl_com,
struct rtw_phl_handler *handler);
enum rtw_phl_status phl_deregister_handler(struct rtw_phl_com_t *phl_com,
struct rtw_phl_handler *handler);
enum rtw_phl_status phl_schedule_handler(struct rtw_phl_com_t *phl_com,
struct rtw_phl_handler *handler);
enum rtw_phl_status phl_indic_pkt_complete(void *phl);
enum rtw_phl_status phl_register_tx_ring(void *phl, u16 macid, u8 hw_band, u8 hw_wmm, u8 hw_port);
enum rtw_phl_status phl_deregister_tx_ring(void *phl, u16 macid);
void phl_free_deferred_tx_ring(struct phl_info_t *phl_info);
enum rtw_phl_status phl_enqueue_busy_h2c_pkt(struct phl_info_t *phl_info,
struct rtw_h2c_pkt *h2c_pkt, u8 pos);
enum rtw_phl_status phl_enqueue_idle_h2c_pkt(struct phl_info_t *phl_info,
struct rtw_h2c_pkt *h2c_pkt);
struct rtw_h2c_pkt *phl_query_busy_h2c_pkt(struct phl_info_t *phl_info);
struct rtw_h2c_pkt *phl_query_idle_h2c_pkt(struct phl_info_t *phl_info, u8 type);
/**
* this function will be used in read / write pointer mechanism and
* return the number of available read pointer
* @rptr: input, the read pointer
* @wptr: input, the write pointer
* @bndy: input, the boundary of read / write pointer mechanism
*/
u16 phl_calc_avail_rptr(u16 rptr, u16 wptr, u16 bndy);
/**
* this function will be used in read / write pointer mechanism and
* return the number of available write pointer
* @rptr: input, the read pointer
* @wptr: input, the write pointer
* @bndy: input, the boundary of read / write pointer mechanism
*/
u16 phl_calc_avail_wptr(u16 rptr, u16 wptr, u16 bndy);
void phl_dump_sorted_ring(_os_list *sorted_ring);
void phl_dump_tx_plan(_os_list *sta_list);
void phl_dump_t_fctrl_result(_os_list *t_fctrl_result);
const char *phl_tfc_lvl_to_str(u8 lvl);
void phl_tx_traffic_upd(struct rtw_stats *sts);
void phl_tx_watchdog(struct phl_info_t *phl_info);
void phl_reset_tx_stats(struct rtw_stats *stats);
void phl_dump_h2c_pool_stats(struct phl_h2c_pkt_pool *h2c_pkt_pool);
enum rtw_phl_status
phl_cmd_cfg_ampdu_hdl(struct phl_info_t *phl_info, u8 *param);
enum rtw_phl_status
phl_data_ctrler(struct phl_info_t *phl_info, struct phl_data_ctl_t *ctl,
struct phl_msg *msg);
void rtw_phl_tx_stop(void *phl);
void rtw_phl_tx_resume(void *phl);
#endif /* __PHL_TX_H_ */
|
2301_81045437/rtl8852be
|
phl/phl_tx.h
|
C
|
agpl-3.0
| 3,789
|
/******************************************************************************
*
* Copyright(c) 2021 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#define _PHL_TXPWR_C_
#include "phl_headers.h"
const char *rtw_phl_get_pw_lmt_regu_type_str(void *phl, enum band_type band)
{
struct phl_info_t *phl_info = phl;
return rtw_hal_get_pw_lmt_regu_type_str(phl_info->hal, band);
}
bool rtw_phl_get_pwr_lmt_en(void *phl, u8 band_idx)
{
struct phl_info_t *phl_info = phl;
return rtw_hal_get_pwr_lmt_en(phl_info->hal, band_idx);
}
enum rtw_phl_status rtw_phl_set_tx_power(void *phl, u8 band_idx)
{
struct phl_info_t *phl_info = phl;
enum rtw_hal_status hstatus = RTW_HAL_STATUS_FAILURE;
hstatus = rtw_hal_set_tx_power(phl_info->hal, band_idx, PWR_BY_RATE | PWR_LIMIT | PWR_LIMIT_RU);
if (hstatus != RTW_HAL_STATUS_SUCCESS)
PHL_ERR("%s rtw_hal_set_tx_power: statuts = %u\n", __func__, hstatus);
return hstatus == RTW_HAL_STATUS_SUCCESS ? RTW_PHL_STATUS_SUCCESS : RTW_PHL_STATUS_FAILURE;
}
|
2301_81045437/rtl8852be
|
phl/phl_txpwr.c
|
C
|
agpl-3.0
| 1,497
|
/******************************************************************************
*
* Copyright(c) 2021 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#ifndef _PHL_TXPWR_H_
#define _PHL_TXPWR_H_
const char *rtw_phl_get_pw_lmt_regu_type_str(void *phl, enum band_type band);
bool rtw_phl_get_pwr_lmt_en(void *phl, u8 band_idx);
enum rtw_phl_status rtw_phl_set_tx_power(void *phl, u8 band_idx);
#endif /*_PHL_TXPWR_H_*/
|
2301_81045437/rtl8852be
|
phl/phl_txpwr.h
|
C
|
agpl-3.0
| 919
|
/******************************************************************************
*
* Copyright(c) 2019 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#ifndef _PHL_TYPES_H_
#define _PHL_TYPES_H_
#define _ALIGN(_len, _align) (((_len) + (_align) - 1) & ~(_align - 1))
#ifndef ARRAY_SIZE
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
#endif
enum lock_type {
_ps,
_bh,
_irq
};
#ifndef PHL_PLATFORM_LINUX
#ifndef inline
#define inline __inline
#endif
#ifndef NULL
#define NULL ((void *)0)
#endif
#ifndef __cplusplus
typedef unsigned char bool;
#endif
#ifndef false
#define false 0
#endif
#ifndef true
#define true 1
#endif
#define RTW_PRINT_SEL(x,...) do {} while (0)
#ifndef BIT
#define BIT(x) (1UL << (x))
#endif
#define _FAIL 0
#define _SUCCESS 1
#define BUG_ON
#define PCI_DMA_TODEVICE 0
#define PCI_DMA_FROMDEVICE 1
#endif /*#ifndef PHL_PLATFORM_LINUX*/
#ifdef PHL_PLATFORM_WINDOWS
#define MAC_ALEN 6
#define _dma unsigned int
#define _os_timer RT_TIMER
#define _os_lock RT_SPIN_LOCK
#define _os_mutex PlatformMutex
#define _os_sema PlatformSemaphore
#define _os_event PlatformEvent
#define _os_list struct list_head
#define _os_atomic volatile long
#define _os_dbgdump DbgPrint
#define KERN_CONT
#define _os_assert ASSERT
#define _os_warn_on
/*#define _os_completion unsigned long*/
#define _os_tasklet RT_THREAD
#define _os_thread RT_THREAD
#define _os_spinlockfg unsigned int
#define _os_workitem RT_WORK_ITEM
#define _os_path_sep "\\"
#define HAL_FILE_CONFIG_PATH ""
#define FW_FILE_CONFIG_PATH ""
#define PLATFOM_IS_LITTLE_ENDIAN 1
#elif defined(PHL_PLATFORM_LINUX)
typedef struct rtw_timer_list _os_timer;
#define _os_lock _lock
#define _os_mutex _mutex
#define _os_sema _sema
#define _os_event struct completion
#define _os_list _list
#define _os_atomic ATOMIC_T
#define MAC_ALEN ETH_ALEN
#define _os_dbgdump _dbgdump
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24))
#define KERN_CONT
#endif
#define _os_assert(_expr) 0/*rtw_bug_on(_expr)*/
#define _os_warn_on(_cond) rtw_warn_on(_cond)
#define _dma dma_addr_t
#define _os_tasklet _tasklet
#define _os_thread struct thread_hdl
#ifdef CONFIG_PHL_CPU_BALANCE
#define _os_workitem _workitem_cpu
#else
#define _os_workitem _workitem
#endif
#define _os_spinlockfg unsigned long
#define _os_path_sep "/"
#ifndef REALTEK_CONFIG_PATH
#define REALTEK_CONFIG_PATH ""
#endif
#define HAL_FILE_CONFIG_PATH REALTEK_CONFIG_PATH
#ifndef CONFIG_FIRMWARE_PATH
#define CONFIG_FIRMWARE_PATH ""
#endif
#define FW_FILE_CONFIG_PATH CONFIG_FIRMWARE_PATH
#ifdef CONFIG_LITTLE_ENDIAN
#define PLATFOM_IS_LITTLE_ENDIAN 1
#else
#define PLATFOM_IS_LITTLE_ENDIAN 0
#endif
#else
#ifdef _WIN64
typedef unsigned long long size_t;
#else
typedef unsigned long size_t;
#endif
#define u8 unsigned char
#define s8 char
#define u16 unsigned short
#define s16 short
#define u32 unsigned int
#define s32 int
#define u64 unsigned long long
#define s64 long long
#define MAC_ALEN 6
/* keep define name then delete if osdep ready */
#define _dma unsigned long
#define _os_timer unsigned long
#define _os_lock unsigned long
#define _os_mutex unsigned long
#define _os_sema unsigned long
#define _os_event unsigned long
#define _os_list struct list_head
#define _os_atomic int
#define _os_dbgdump(_fmt, ...)
#define KERN_CONT
#define _os_assert(_expr)
#define _os_warn_on(_cond)
#define _os_spinlockfg unsigned int
#define _os_tasklet unsigned long
#define _os_thread unsigned long
#define _os_workitem unsigned long
#define _os_path_sep "/"
#define HAL_FILE_CONFIG_PATH ""
#define FW_FILE_CONFIG_PATH ""
#define PLATFOM_IS_LITTLE_ENDIAN 1
#endif
struct _os_handler {
union {
_os_tasklet tasklet;
_os_workitem workitem;
_os_thread thread;
} u;
};
#ifndef PHL_PLATFORM_LINUX
#define SWAP32(x) \
((u32)((((u32)(x) & (u32)0x000000ff) << 24) | \
(((u32)(x) & (u32)0x0000ff00) << 8) | \
(((u32)(x) & (u32)0x00ff0000) >> 8) | \
(((u32)(x) & (u32)0xff000000) >> 24)))
#define SWAP16(x) \
((u16)((((u16)(x) & (u16)0x00ff) << 8) | \
(((u16)(x) & (u16)0xff00) >> 8)))
/* 1: the platform is Little Endian. */
/* 0: the platform is Big Endian. */
#if PLATFOM_IS_LITTLE_ENDIAN
#define cpu_to_le32(x) ((u32)(x))
#define le32_to_cpu(x) ((u32)(x))
#define cpu_to_le16(x) ((u16)(x))
#define le16_to_cpu(x) ((u16)(x))
#define cpu_to_be32(x) SWAP32((x))
#define be32_to_cpu(x) SWAP32((x))
#define cpu_to_be16(x) SWAP16((x))
#define be16_to_cpu(x) SWAP16((x))
#else
#define cpu_to_le32(x) SWAP32((x))
#define le32_to_cpu(x) SWAP32((x))
#define cpu_to_le16(x) SWAP16((x))
#define le16_to_cpu(x) SWAP16((x))
#define cpu_to_be32(x) ((u32)(x))
#define be32_to_cpu(x) ((u32)(x))
#define cpu_to_be16(x) ((u16)(x))
#define be16_to_cpu(x) ((u16)(x))
#endif /*PLATFOM_IS_LITTLE_ENDIAN*/
typedef u16 __le16;
typedef u32 __le32;
typedef u16 __be16;
typedef u32 __be32;
#endif /*#ifndef PHL_PLATFORM_LINUX*/
#endif /*_PHL_TYPES_H_*/
|
2301_81045437/rtl8852be
|
phl/phl_types.h
|
C
|
agpl-3.0
| 5,962
|
/******************************************************************************
*
* Copyright(c) 2019 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#include "phl_headers.h"
#define _PHL_UTIL_C_
/* phl queue general API */
void pq_init(void *d, struct phl_queue *q)
{
INIT_LIST_HEAD(&q->queue);
_os_spinlock_init(d, &q->lock);
q->cnt = 0;
}
void pq_deinit(void *d, struct phl_queue *q)
{
_os_spinlock_free(d, &q->lock);
}
void pq_reset(void *d, struct phl_queue *q, enum lock_type type)
{
_os_spinlockfg sp_flags;
_os_spinlock(d, &q->lock, type, &sp_flags);
INIT_LIST_HEAD(&q->queue);
q->cnt = 0;
_os_spinunlock(d, &q->lock, type, &sp_flags);
}
u8 pq_push(void *d, struct phl_queue *q, _os_list *obj, u8 pos, enum lock_type type)
{
_os_spinlockfg sp_flags;
_os_spinlock(d, &q->lock, type, &sp_flags);
if(pos == _first)
list_add(obj, &q->queue);
else
list_add_tail(obj, &q->queue);
q->cnt++;
_os_spinunlock(d, &q->lock, type, &sp_flags);
return true;
}
u8 pq_pop(void *d, struct phl_queue *q, _os_list **obj, u8 pos, enum lock_type type)
{
_os_spinlockfg sp_flags = 0;
(*obj) = NULL;
_os_spinlock(d, &q->lock, type, &sp_flags);
if(!list_empty(&q->queue) && (q->cnt > 0)) {
if(pos == _first)
(*obj) = _get_next(&q->queue);
else
(*obj) = _get_prev(&q->queue);
list_del(*obj);
q->cnt--;
}
_os_spinunlock(d, &q->lock, type, &sp_flags);
return ((*obj) == NULL || (*obj) == &q->queue) ? (false) : (true);
}
u8 pq_get_front(void *d, struct phl_queue *q, _os_list **obj, enum lock_type type)
{
_os_spinlockfg sp_flags = 0;
(*obj) = NULL;
_os_spinlock(d, &q->lock, type, &sp_flags);
if(!list_empty(&q->queue) && (q->cnt > 0))
(*obj) = q->queue.next;
_os_spinunlock(d, &q->lock, type, &sp_flags);
return ((*obj) == NULL || (*obj) == &q->queue) ? (false) : (true);
}
u8 pq_get_next(void *d, struct phl_queue *queue, _os_list *cur_obj,
_os_list **obj, enum lock_type type)
{
_os_spinlockfg sp_flags;
(*obj) = NULL;
if(cur_obj == NULL)
return false;
_os_spinlock(d, &queue->lock, type, &sp_flags);
(*obj) = cur_obj->next;
_os_spinunlock(d, &queue->lock, type, &sp_flags);
return ((*obj) == NULL || (*obj) == &(queue->queue)) ? (false) : (true);
}
u8 pq_get_tail(void *d, struct phl_queue *q, _os_list **obj, enum lock_type type)
{
_os_spinlockfg sp_flags = 0;
(*obj) = NULL;
_os_spinlock(d, &q->lock, type, &sp_flags);
if(!list_empty(&q->queue) && (q->cnt > 0))
(*obj) = q->queue.prev;
_os_spinunlock(d, &q->lock, type, &sp_flags);
return ((*obj) == NULL || (*obj) == &q->queue) ? (false) : (true);
}
u8 pq_get_prev(void *d, struct phl_queue *queue, _os_list *cur_obj,
_os_list **obj, enum lock_type type)
{
_os_spinlockfg sp_flags;
(*obj) = NULL;
if(cur_obj == NULL)
return false;
_os_spinlock(d, &queue->lock, type, &sp_flags);
(*obj) = cur_obj->prev;
_os_spinunlock(d, &queue->lock, type, &sp_flags);
return ((*obj) == NULL || (*obj) == &(queue->queue)) ? (false) : (true);
}
u8 pq_search_node(void *d, struct phl_queue *q, _os_list **obj,
enum lock_type type, bool bdel, void *priv,
u8 (*search_fun)(void *d, void *obj, void *priv))
{
_os_spinlockfg sp_flags = 0;
_os_list *newobj = NULL;
bool bhit = false;
(*obj) = NULL;
_os_spinlock(d, &q->lock, type, &sp_flags);
if(!list_empty(&q->queue) && (q->cnt > 0))
newobj = _get_next(&q->queue);
while(newobj && (newobj != &(q->queue))) {
if(search_fun)
bhit = search_fun(d, newobj, priv);
if(bhit && bdel) {
list_del(newobj);
q->cnt--;
}
if(bhit) {
(*obj) = newobj;
break;
}
newobj = newobj->next;
};
_os_spinunlock(d, &q->lock, type, &sp_flags);
return ((*obj) == NULL || (*obj) == &(q->queue)) ? (false) : (true);
}
void pq_del_node(void *d, struct phl_queue *q, _os_list *obj, enum lock_type type)
{
_os_spinlockfg sp_flags;
if(obj == NULL)
return;
_os_spinlock(d, &q->lock, type, &sp_flags);
list_del(obj);
q->cnt--;
_os_spinunlock(d, &q->lock, type, &sp_flags);
}
u8 pq_insert(void *d, struct phl_queue *q, enum lock_type type, void *priv, _os_list *input,
u8 (*pq_predicate)(void *d, void *priv,_os_list *input, _os_list *obj))
{
_os_spinlockfg sp_flags;
_os_list *obj = NULL;
_os_spinlock(d, &q->lock, type, &sp_flags);
obj = q->queue.next;
while (obj != &(q->queue)) {
if (pq_predicate && (pq_predicate(d, priv, input, obj) == true))
break;
obj = obj->next;
}
list_add_tail(input, obj);
q->cnt++;
_os_spinunlock(d, &q->lock, type, &sp_flags);
return true;
}
u32 phl_get_passing_time_us(u32 start)
{
u32 now = _os_get_cur_time_us();
u32 pass = 0;
if (now == start)
pass = 0;
else if (now > start)
/* -- start -- now -- */
pass = now - start;
else
/* -- now -- start -- */
pass = 0xffffffff - start + now;
return pass;
}
u32 phl_get_passing_time_ms(u32 start)
{
u32 now = _os_get_cur_time_ms();
u32 pass = 0;
if (now == start)
pass = 0;
else if (now > start)
/* -- start -- now -- */
pass = now - start;
else
/* -- now -- start -- */
pass = 0xffffffff - start + now;
return pass;
}
|
2301_81045437/rtl8852be
|
phl/phl_util.c
|
C
|
agpl-3.0
| 5,561
|
/******************************************************************************
*
* Copyright(c) 2019 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#ifndef _PHL_UTIL_H_
#define _PHL_UTIL_H_
#define phlcom_to_drvpriv(_pcom) (_pcom->drv_priv)
#define phl_is_mp_mode(_phl_com) (_phl_com->drv_mode >= RTW_DRV_MODE_MP_SMDL_START && _phl_com->drv_mode <= RTW_DRV_MODE_MP_SMDL_END)
#ifndef is_broadcast_mac_addr
#define is_broadcast_mac_addr(addr) ((((addr[0]) & 0xff) == 0xff) && (((addr[1]) & 0xff) == 0xff) && \
(((addr[2]) & 0xff) == 0xff) && (((addr[3]) & 0xff) == 0xff) && (((addr[4]) & 0xff) == 0xff) && \
(((addr[5]) & 0xff) == 0xff))
#endif
#ifndef MIN
#define MIN(X, Y) (((X) < (Y)) ? (X) : (Y))
#endif
#ifndef DIFF
#define DIFF(_x_, _y_) ((_x_ >= _y_) ? (_x_ - _y_) : (_y_ - _x_))
#endif
#define SET_STATUS_FLAG(_status,_flags) \
((_status) |= (_flags))
#define TEST_STATUS_FLAG(_status,_flags)\
(((_status) & (_flags))==(_flags))
#define CLEAR_STATUS_FLAG(_status,_flags)\
((_status) &= ~(_flags))
static inline void _add_bitmap_bit(u8 *bitmap, u8 *arr, u32 len)
{
u32 k = 0;
for(k = 0; k < (len); k++)
bitmap[arr[k] / 8] |= (BIT0 << (arr[k] % 8));
}
static inline void _clr_bitmap_bit(u8 *bitmap, u8 *arr, u32 len)
{
u32 k = 0;
for(k = 0; k < (len); k++)
bitmap[arr[k] / 8] &= ~(BIT0 << (arr[k] % 8));
}
#define _chk_bitmap_bit(_bitmap, _id) \
((_bitmap)[(_id) / 8] & (BIT0 << ((_id) % 8)))
#define _reset_bitmap(_d, _bitmap ,_len) _os_mem_set(_d, _bitmap, 0, _len)
static inline void _and_bitmaps( u8* ref_bitmap, u8* _bitmap, u32 len)
{
u32 k = 0;
for(k = 0; k < len; k++)
_bitmap[k] &= ref_bitmap[k];
}
/*phl_queue*/
struct phl_queue {
_os_list queue;
_os_lock lock;
int cnt;
};
static inline _os_list *_get_next(_os_list *list)
{
return list->next;
}
static inline _os_list *_get_prev(_os_list *list)
{
return list->prev;
}
static inline _os_list *_get_list_head(struct phl_queue *q)
{
return (&q->queue);
}
void pq_init(void *d, struct phl_queue *q);
void pq_deinit(void *d, struct phl_queue *q);
void pq_reset(void *d, struct phl_queue *q, enum lock_type type);
u8 pq_push(void *d, struct phl_queue *q, _os_list *obj, u8 pos, enum lock_type type);
u8 pq_pop(void *d, struct phl_queue *q, _os_list **obj, u8 pos, enum lock_type type);
u8 pq_get_next(void *d, struct phl_queue *queue, _os_list *cur_obj,
_os_list **obj, enum lock_type type);
u8 pq_get_front(void *d, struct phl_queue *queue, _os_list **obj,
enum lock_type type);
u8 pq_get_tail(void *d, struct phl_queue *q, _os_list **obj, enum lock_type type);
u8 pq_get_prev(void *d, struct phl_queue *queue, _os_list *cur_obj,
_os_list **obj, enum lock_type type);
void pq_del_node(void *d, struct phl_queue *q, _os_list *obj, enum lock_type type);
u8 pq_search_node(void *d, struct phl_queue *q, _os_list **obj,
enum lock_type type, bool bdel, void *priv,
u8 (*search_fun)(void *d, void *obj, void *priv));
u8 pq_insert(void *d, struct phl_queue *q, enum lock_type type, void *priv, _os_list *input,
u8 (*pq_predicate)(void *d, void *priv,_os_list *input, _os_list *obj));
u32 phl_get_passing_time_us(u32 start);
u32 phl_get_passing_time_ms(u32 start);
#endif /*_PHL_UTIL_H_*/
|
2301_81045437/rtl8852be
|
phl/phl_util.h
|
C
|
agpl-3.0
| 3,722
|
/******************************************************************************
*
* Copyright(c) 2019 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#ifndef _PHL_VERSION_H_
#define _PHL_VERSION_H_
/*major-minor-hotfix-[branchid + hotfix]*/
#define PHL_VERSION(a, b, c, d) (((a) << 48) + ((b) << 32) + ((c) << 16) + (d))
#define PHL_MAJOR_VER 0001
#define PHL_MINOR_VER 0015
#define PHL_HOTFIX_VER 0028
#define PHL_BRANCH_IDX 0100
#define PHL_VER_CODE PHL_VERSION(PHL_MAJOR_VER, PHL_MINOR_VER, PHL_HOTFIX_VER, PHL_BRANCH_IDX)
#endif /*_PHL_VERSION_H_*/
|
2301_81045437/rtl8852be
|
phl/phl_version.h
|
C
|
agpl-3.0
| 1,056
|
/******************************************************************************
*
* Copyright(c) 2019 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#define _PHL_WATCHDOG_C_
#include "phl_headers.h"
#ifdef CONFIG_FSM
static void _phl_datapath_watchdog(struct phl_info_t *phl_info)
{
phl_tx_watchdog(phl_info);
phl_rx_watchdog(phl_info);
phl_sta_trx_tfc_upd(phl_info);
}
void rtw_phl_watchdog_callback(void *phl)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
do {
_phl_datapath_watchdog(phl_info);
#ifdef CONFIG_PCI_HCI
#ifdef RTW_WKARD_DYNAMIC_LTR
phl_ltr_ctrl_watchdog(phl_info);
#endif
#ifdef PCIE_TRX_MIT_EN
phl_pcie_trx_mit_watchdog(phl_info);
#endif
#endif
phl_mr_watchdog(phl_info);
rtw_hal_watchdog(phl_info->hal);
} while (false);
}
#endif
static void _phl_watchdog_sw(struct phl_info_t *phl)
{
/* Only sw statistics or sw behavior or trigger FG cmd */
phl_tx_watchdog(phl);
phl_rx_watchdog(phl);
phl_sta_trx_tfc_upd(phl);
}
static void _phl_watchdog_hw(struct phl_info_t *phl)
{
#ifdef CONFIG_PHL_THERMAL_PROTECT
phl_thermal_protect_watchdog(phl);
#endif
/* I/O, tx behavior, request power, ... */
#ifdef CONFIG_PCI_HCI
#ifdef RTW_WKARD_DYNAMIC_LTR
phl_ltr_ctrl_watchdog(phl);
#endif
#ifdef PCIE_TRX_MIT_EN
phl_pcie_trx_mit_watchdog(phl);
#endif
#endif
phl_mr_watchdog(phl);
rtw_hal_watchdog(phl->hal);
phl_bcn_watchdog(phl);
}
#ifdef CONFIG_CMD_DISP
static void _phl_watchdog_done(void *drv_priv, u8 *cmd, u32 cmd_len, enum rtw_phl_status status)
{
struct phl_info_t *phl_info = (struct phl_info_t *)cmd;
struct phl_watchdog *wdog = &(phl_info->wdog);
_os_set_timer(drv_priv,
&wdog->wdog_timer,
wdog->period);
}
static enum rtw_phl_status
_phl_watchdog_cmd(struct phl_info_t *phl_info,
enum phl_cmd_type cmd_type,
u32 cmd_timeout)
{
enum rtw_phl_status phl_status = RTW_PHL_STATUS_FAILURE;
if (cmd_type == PHL_CMD_DIRECTLY) {
phl_status = phl_watchdog_cmd_hdl(phl_info, RTW_PHL_STATUS_SUCCESS);
goto _exit;
}
/* watchdog dont care hw_band */
phl_status = phl_cmd_enqueue(phl_info,
HW_BAND_0,
MSG_EVT_WATCHDOG,
(u8 *)phl_info,
0,
_phl_watchdog_done,
cmd_type,
cmd_timeout);
if (is_cmd_failure(phl_status)) {
/* Send cmd success, but wait cmd fail*/
phl_status = RTW_PHL_STATUS_FAILURE;
} else if (phl_status != RTW_PHL_STATUS_SUCCESS) {
/* Send cmd fail */
phl_status = RTW_PHL_STATUS_FAILURE;
}
_exit:
return phl_status;
}
#endif
static void _phl_watchdog_timer_expired(void *context)
{
struct phl_info_t *phl_info = (struct phl_info_t *)context;
struct phl_watchdog *wdog = &(phl_info->wdog);
enum rtw_phl_status psts = RTW_PHL_STATUS_FAILURE;
#ifdef CONFIG_CMD_DISP
/* phl sw watchdog */
_phl_watchdog_sw(phl_info);
/* core sw watchdog */
if (NULL != wdog->core_sw_wdog)
wdog->core_sw_wdog(phl_to_drvpriv(phl_info));
/* check if there is FG cmd on hw_band 0/1 */
if (true == phl_disp_eng_is_fg_empty(phl_info, HW_BAND_MAX)) {
PHL_TRACE(COMP_PHL_DBG, _PHL_DEBUG_, "%s: trigger watchdog(period %d)\n",
__FUNCTION__, wdog->period);
/* send watchdog cmd to request privilege of I/O */
psts = _phl_watchdog_cmd(phl_info, PHL_CMD_NO_WAIT, 0);
} else {
PHL_TRACE(COMP_PHL_DBG, _PHL_DEBUG_, "%s: skip watchdog\n",
__FUNCTION__);
}
if (psts == RTW_PHL_STATUS_FAILURE) {
_os_set_timer(phl_to_drvpriv(phl_info),
&wdog->wdog_timer,
wdog->period);
}
#else
PHL_TRACE(COMP_PHL_DBG, _PHL_ERR_, "%s: Not support watchdog\n", __FUNCTION__);
#endif
}
enum rtw_phl_status
phl_watchdog_cmd_hdl(struct phl_info_t *phl_info, enum rtw_phl_status psts)
{
struct phl_watchdog *wdog = &(phl_info->wdog);
if (false == is_cmd_failure(psts)) {
_phl_watchdog_hw(phl_info);
if (NULL != wdog->core_hw_wdog)
wdog->core_hw_wdog(phl_to_drvpriv(phl_info));
}
return RTW_PHL_STATUS_SUCCESS;
}
void rtw_phl_watchdog_init(void *phl,
u16 period,
void (*core_sw_wdog)(void *drv_priv),
void (*core_hw_wdog)(void *drv_priv))
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct phl_watchdog *wdog = &(phl_info->wdog);
wdog->core_sw_wdog = core_sw_wdog;
wdog->core_hw_wdog = core_hw_wdog;
if (period > 0)
wdog->period = period;
else
wdog->period = WDOG_PERIOD;
_os_init_timer(phl_to_drvpriv(phl_info),
&wdog->wdog_timer,
_phl_watchdog_timer_expired,
phl,
"phl_watchdog_timer");
}
void rtw_phl_watchdog_deinit(void *phl)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct phl_watchdog *wdog = &(phl_info->wdog);
_os_release_timer(phl_to_drvpriv(phl_info), &wdog->wdog_timer);
}
void rtw_phl_watchdog_start(void *phl)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct phl_watchdog *wdog = &(phl_info->wdog);
PHL_INFO("%s\n", __func__);
_os_set_timer(phl_to_drvpriv(phl_info),
&wdog->wdog_timer,
wdog->period);
}
void rtw_phl_watchdog_stop(void *phl)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct phl_watchdog *wdog = &(phl_info->wdog);
PHL_INFO("%s\n", __func__);
_os_cancel_timer(phl_to_drvpriv(phl_info), &wdog->wdog_timer);
}
|
2301_81045437/rtl8852be
|
phl/phl_watchdog.c
|
C
|
agpl-3.0
| 6,038
|
/******************************************************************************
*
* Copyright(c) 2019 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#ifndef _PHL_WATCHDOG_H_
#define _PHL_WATCHDOG_H_
#define WDOG_PERIOD 2000
struct phl_watchdog {
_os_timer wdog_timer;
/* Only sw statistics or sw behavior or trigger FG cmd */
void (*core_sw_wdog)(void *drv_priv);
/* I/O, tx behavior, request power, ... */
void (*core_hw_wdog)(void *drv_priv);
u16 period;
};
enum rtw_phl_status
phl_watchdog_cmd_hdl(struct phl_info_t *phl_info, enum rtw_phl_status psts);
#ifdef CONFIG_FSM
void rtw_phl_watchdog_callback(void *phl);
#endif
#endif /*_PHL_WATCHDOG_H_*/
|
2301_81045437/rtl8852be
|
phl/phl_watchdog.h
|
C
|
agpl-3.0
| 1,167
|
/******************************************************************************
*
* Copyright(c) 2019 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#define _PHL_WOW_C_
#include "phl_headers.h"
enum rtw_phl_status phl_wow_mdl_init(struct phl_info_t* phl_info)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_SUCCESS;
#ifdef CONFIG_WOWLAN
struct phl_wow_info *info = phl_to_wow_info(phl_info);
info->phl_info = phl_info;
_os_spinlock_init(phl_to_drvpriv(phl_info), &info->wow_lock);
#endif /* CONFIG_WOWLAN */
return pstatus;
}
void phl_wow_mdl_deinit(struct phl_info_t* phl_info)
{
#ifdef CONFIG_WOWLAN
struct phl_wow_info *info = phl_to_wow_info(phl_info);
_os_spinlock_free(phl_to_drvpriv(phl_info), &info->wow_lock);
#endif /* CONFIG_WOWLAN */
}
#ifdef CONFIG_WOWLAN
/* TO-DO: Confirm the enum strcut of the algo */
u8 _phl_query_iv_len(u8 algo)
{
u8 len = 0;
switch(algo) {
case RTW_ENC_WEP40:
len = 4;
break;
case RTW_ENC_TKIP:
case RTW_ENC_CCMP:
case RTW_ENC_GCMP256:
len = 8;
break;
default:
len = 0;
break;
}
return len;
}
u8 _phl_query_key_desc_ver(struct phl_wow_info *wow_info, u8 algo)
{
u8 akm_type = wow_info->gtk_ofld_info.akmtype_byte3;
if (algo == RTW_ENC_TKIP)
return EAPOLKEY_KEYDESC_VER_1;
if (akm_type == 1 || akm_type == 2) {
return EAPOLKEY_KEYDESC_VER_2;
} else if (akm_type > 2 && akm_type < 7) {
return EAPOLKEY_KEYDESC_VER_3;
} else {
return 0;
}
}
static void _phl_cfg_pkt_ofld_null_info(
struct phl_wow_info *wow_info,
struct rtw_phl_stainfo_t *phl_sta,
struct rtw_pkt_ofld_null_info *null_info)
{
void *drv_priv = phl_to_drvpriv(wow_info->phl_info);
_os_mem_cpy(drv_priv, &(null_info->a1[0]), &(phl_sta->mac_addr[0]), MAC_ADDRESS_LENGTH);
_os_mem_cpy(drv_priv, &(null_info->a2[0]), &(phl_sta->wrole->mac_addr[0]), MAC_ADDRESS_LENGTH);
_os_mem_cpy(drv_priv, &(null_info->a3[0]), &(phl_sta->mac_addr[0]), MAC_ADDRESS_LENGTH);
}
static void _phl_cfg_pkt_ofld_arp_rsp_info(struct phl_wow_info *wow_info, struct rtw_phl_stainfo_t *phl_sta,
struct rtw_pkt_ofld_arp_rsp_info *arp_rsp_info)
{
void *drv_priv = phl_to_drvpriv(wow_info->phl_info);
u8 pairwise_algo = get_wow_pairwise_algo_type(wow_info);
_os_mem_cpy(drv_priv, &(arp_rsp_info->a1[0]), &(phl_sta->mac_addr[0]), MAC_ADDRESS_LENGTH);
_os_mem_cpy(drv_priv, &(arp_rsp_info->a2[0]), &(phl_sta->wrole->mac_addr[0]), MAC_ADDRESS_LENGTH);
_os_mem_cpy(drv_priv, &(arp_rsp_info->a3[0]), &(phl_sta->mac_addr[0]), MAC_ADDRESS_LENGTH);
_os_mem_cpy(drv_priv, &(arp_rsp_info->host_ipv4_addr[0]),
&(wow_info->arp_ofld_info.arp_ofld_content.host_ipv4_addr[0]),
IPV4_ADDRESS_LENGTH);
_os_mem_cpy(drv_priv, &(arp_rsp_info->remote_ipv4_addr[0]),
&(wow_info->arp_ofld_info.arp_ofld_content.remote_ipv4_addr[0]),
IPV4_ADDRESS_LENGTH);
arp_rsp_info->sec_hdr = _phl_query_iv_len(pairwise_algo);
}
static void _phl_cfg_pkt_ofld_na_info(struct phl_wow_info *wow_info, struct rtw_phl_stainfo_t *phl_sta,
struct rtw_pkt_ofld_na_info *na_info)
{
void *drv_priv = phl_to_drvpriv(wow_info->phl_info);
u8 pairwise_algo = get_wow_pairwise_algo_type(wow_info);
_os_mem_cpy(drv_priv, &(na_info->a1[0]), &(phl_sta->mac_addr[0]), MAC_ADDRESS_LENGTH);
_os_mem_cpy(drv_priv, &(na_info->a2[0]), &(phl_sta->wrole->mac_addr[0]), MAC_ADDRESS_LENGTH);
_os_mem_cpy(drv_priv, &(na_info->a3[0]), &(phl_sta->mac_addr[0]), MAC_ADDRESS_LENGTH);
na_info->sec_hdr = _phl_query_iv_len(pairwise_algo);
}
static void _phl_cfg_pkt_ofld_eapol_key_info(
struct phl_wow_info *wow_info,
struct rtw_phl_stainfo_t *phl_sta,
struct rtw_pkt_ofld_eapol_key_info *eapol_key_info)
{
void *drv_priv = phl_to_drvpriv(wow_info->phl_info);
struct rtw_gtk_ofld_info *gtk_ofld_info = &wow_info->gtk_ofld_info;
u8 pairwise_algo = get_wow_pairwise_algo_type(wow_info);
_os_mem_cpy(drv_priv, &(eapol_key_info->a1[0]), &(phl_sta->mac_addr[0]),
MAC_ADDRESS_LENGTH);
_os_mem_cpy(drv_priv, &(eapol_key_info->a2[0]), &(phl_sta->wrole->mac_addr[0]),
MAC_ADDRESS_LENGTH);
_os_mem_cpy(drv_priv, &(eapol_key_info->a3[0]), &(phl_sta->mac_addr[0]),
MAC_ADDRESS_LENGTH);
eapol_key_info->sec_hdr = _phl_query_iv_len(pairwise_algo);
eapol_key_info->key_desc_ver = _phl_query_key_desc_ver(wow_info, pairwise_algo);
_os_mem_cpy(drv_priv, eapol_key_info->replay_cnt,
gtk_ofld_info->gtk_ofld_content.replay_cnt, 8);
}
static void _phl_cfg_pkt_ofld_sa_query_info(
struct phl_wow_info *wow_info,
struct rtw_phl_stainfo_t *phl_sta,
struct rtw_pkt_ofld_sa_query_info *sa_query_info)
{
void *drv_priv = phl_to_drvpriv(wow_info->phl_info);
u8 pairwise_algo = get_wow_pairwise_algo_type(wow_info);
_os_mem_cpy(drv_priv, &(sa_query_info->a1[0]), &(phl_sta->mac_addr[0]),
MAC_ADDRESS_LENGTH);
_os_mem_cpy(drv_priv, &(sa_query_info->a2[0]), &(phl_sta->wrole->mac_addr[0]),
MAC_ADDRESS_LENGTH);
_os_mem_cpy(drv_priv, &(sa_query_info->a3[0]), &(phl_sta->mac_addr[0]),
MAC_ADDRESS_LENGTH);
sa_query_info->sec_hdr = _phl_query_iv_len(pairwise_algo);
}
static void _phl_cfg_pkt_ofld_realwow_kapkt_info(
struct phl_wow_info *wow_info,
struct rtw_phl_stainfo_t *phl_sta,
struct rtw_pkt_ofld_realwow_kapkt_info *kapkt_info)
{
void *drv_priv = phl_to_drvpriv(wow_info->phl_info);
_os_mem_cpy(drv_priv, &(kapkt_info->keep_alive_pkt_ptrn[0]),
&(wow_info->realwow_info.realwow_ofld_content.keep_alive_pkt_ptrn[0]),
wow_info->realwow_info.realwow_ofld_content.keep_alive_pkt_size);
kapkt_info->keep_alive_pkt_size =
wow_info->realwow_info.realwow_ofld_content.keep_alive_pkt_size;
}
static void _phl_cfg_pkt_ofld_realwow_ack_info(
struct phl_wow_info *wow_info,
struct rtw_pkt_ofld_realwow_ack_info *ack_info)
{
void *drv_priv = phl_to_drvpriv(wow_info->phl_info);
_os_mem_cpy(drv_priv, &(ack_info->ack_ptrn[0]),
&(wow_info->realwow_info.realwow_ofld_content.ack_ptrn[0]),
wow_info->realwow_info.realwow_ofld_content.ack_ptrn_size);
ack_info->ack_ptrn_size = wow_info->realwow_info.realwow_ofld_content.ack_ptrn_size;
}
static void _phl_cfg_pkt_ofld_realwow_wp_info(
struct phl_wow_info *wow_info,
struct rtw_pkt_ofld_realwow_wp_info *wp_info)
{
void *drv_priv = phl_to_drvpriv(wow_info->phl_info);
_os_mem_cpy(drv_priv, &(wp_info->wakeup_ptrn[0]),
&(wow_info->realwow_info.realwow_ofld_content.wakeup_ptrn[0]),
wow_info->realwow_info.realwow_ofld_content.wakeup_ptrn_size);
wp_info->wakeup_ptrn_size = wow_info->realwow_info.realwow_ofld_content.wakeup_ptrn_size;
}
enum rtw_phl_status rtw_phl_cfg_keep_alive_info(void *phl, struct rtw_keep_alive_info *info)
{
enum rtw_phl_status phl_status = RTW_PHL_STATUS_SUCCESS;
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct phl_wow_info *wow_info = phl_to_wow_info(phl_info);
struct rtw_keep_alive_info *keep_alive_info = &wow_info->keep_alive_info;
FUNCIN();
keep_alive_info->keep_alive_en = info->keep_alive_en;
keep_alive_info->keep_alive_period = info->keep_alive_period;
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] keep_alive_en %d\n", keep_alive_info->keep_alive_en);
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] keep_alive_period %d\n", keep_alive_info->keep_alive_period);
return phl_status;
}
enum rtw_phl_status rtw_phl_cfg_disc_det_info(void *phl, struct rtw_disc_det_info *info)
{
enum rtw_phl_status phl_status = RTW_PHL_STATUS_SUCCESS;
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct phl_wow_info *wow_info = phl_to_wow_info(phl_info);
struct rtw_disc_det_info *disc_det_info = &wow_info->disc_det_info;
FUNCIN();
disc_det_info->disc_det_en = info->disc_det_en;
disc_det_info->disc_wake_en = info->disc_wake_en;
disc_det_info->try_pkt_count = info->try_pkt_count;
disc_det_info->check_period = info->check_period;
disc_det_info->cnt_bcn_lost_en = info->cnt_bcn_lost_en;
disc_det_info->cnt_bcn_lost_limit = info->cnt_bcn_lost_limit;
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] disc_det_en %d\n", disc_det_info->disc_det_en);
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] disc_wake_en %d\n", disc_det_info->disc_wake_en);
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] try_pkt_count %d\n", disc_det_info->try_pkt_count);
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] check_period %d\n", disc_det_info->check_period);
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] cnt_bcn_lost_en %d\n", disc_det_info->cnt_bcn_lost_en);
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] cnt_bcn_lost_limit %d\n", disc_det_info->cnt_bcn_lost_limit);
return phl_status;
}
enum rtw_phl_status rtw_phl_cfg_nlo_info(void *phl, struct rtw_nlo_info *info)
{
enum rtw_phl_status phl_status = RTW_PHL_STATUS_SUCCESS;
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct phl_wow_info *wow_info = phl_to_wow_info(phl_info);
struct rtw_nlo_info *nlo_info = &wow_info->nlo_info;
nlo_info->nlo_en = info->nlo_en;
return phl_status;
}
void rtw_phl_cfg_arp_ofld_info(void *phl, struct rtw_arp_ofld_info *info)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct phl_wow_info *wow_info = phl_to_wow_info(phl_info);
struct rtw_arp_ofld_info *arp_ofld_info = &wow_info->arp_ofld_info;
void *drv_priv = phl_to_drvpriv(phl_info);
FUNCIN();
arp_ofld_info->arp_en = info->arp_en;
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] arp_en %u\n",
arp_ofld_info->arp_en);
/* If not enabled, the following actions are not necessary */
if (false == arp_ofld_info->arp_en)
return;
arp_ofld_info->arp_action = info->arp_action;
_os_mem_cpy(drv_priv,
&(arp_ofld_info->arp_ofld_content.remote_ipv4_addr[0]),
&(info->arp_ofld_content.remote_ipv4_addr[0]),
IPV4_ADDRESS_LENGTH);
_os_mem_cpy(drv_priv,
&(arp_ofld_info->arp_ofld_content.host_ipv4_addr[0]),
&(info->arp_ofld_content.host_ipv4_addr[0]),
IPV4_ADDRESS_LENGTH);
_os_mem_cpy(drv_priv,
&(arp_ofld_info->arp_ofld_content.mac_addr[0]),
&(info->arp_ofld_content.mac_addr[0]),
MAC_ADDRESS_LENGTH);
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] arp_action %u\n",
arp_ofld_info->arp_action);
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] arp_remote_ipv4 %u:%u:%u:%u\n",
arp_ofld_info->arp_ofld_content.remote_ipv4_addr[0],
arp_ofld_info->arp_ofld_content.remote_ipv4_addr[1],
arp_ofld_info->arp_ofld_content.remote_ipv4_addr[2],
arp_ofld_info->arp_ofld_content.remote_ipv4_addr[3]);
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] arp_host_ipv4 %u:%u:%u:%u\n",
arp_ofld_info->arp_ofld_content.host_ipv4_addr[0],
arp_ofld_info->arp_ofld_content.host_ipv4_addr[1],
arp_ofld_info->arp_ofld_content.host_ipv4_addr[2],
arp_ofld_info->arp_ofld_content.host_ipv4_addr[3]);
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] arp_mac_addr %02x:%02x:%02x:%02x:%02x:%02x \n",
arp_ofld_info->arp_ofld_content.mac_addr[0],
arp_ofld_info->arp_ofld_content.mac_addr[1],
arp_ofld_info->arp_ofld_content.mac_addr[2],
arp_ofld_info->arp_ofld_content.mac_addr[3],
arp_ofld_info->arp_ofld_content.mac_addr[4],
arp_ofld_info->arp_ofld_content.mac_addr[5]);
}
void rtw_phl_cfg_ndp_ofld_info(void *phl, struct rtw_ndp_ofld_info *info)
{
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct phl_wow_info *wow_info = phl_to_wow_info(phl_info);
struct rtw_ndp_ofld_info *ndp_ofld_info = &wow_info->ndp_ofld_info;
struct rtw_ndp_ofld_content *pcontent;
void *drv_priv = phl_to_drvpriv(phl_info);
u8 idx = 0;
FUNCIN();
ndp_ofld_info->ndp_en = info->ndp_en;
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] ndp_en %u\n",
ndp_ofld_info->ndp_en);
/* If not enabled, the following actions are not necessary */
if (false == ndp_ofld_info->ndp_en)
return;
for (idx = 0; idx < 2; idx++) {
pcontent = &ndp_ofld_info->ndp_ofld_content[idx];
pcontent->ndp_en = info->ndp_ofld_content[idx].ndp_en;
pcontent->chk_remote_ip =
info->ndp_ofld_content[idx].chk_remote_ip;
pcontent->num_target_ip =
info->ndp_ofld_content[idx].num_target_ip;
_os_mem_cpy(drv_priv, &(pcontent->mac_addr[0]),
&(info->ndp_ofld_content[idx].mac_addr[0]),
MAC_ADDRESS_LENGTH);
_os_mem_cpy(drv_priv, &(pcontent->remote_ipv6_addr[0]),
&(info->ndp_ofld_content[idx].remote_ipv6_addr[0]),
IPV6_ADDRESS_LENGTH);
_os_mem_cpy(drv_priv, &(pcontent->target_ipv6_addr[0][0]),
&(info->ndp_ofld_content[idx].target_ipv6_addr[0][0]),
IPV6_ADDRESS_LENGTH*2);
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] ndp_chk_remote_ip %u\n",
pcontent->chk_remote_ip);
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] ndp_num_target_ip %u\n",
pcontent->num_target_ip);
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] ndp_mac_addr %02x:%02x:%02x:%02x:%02x:%02x \n",
pcontent->mac_addr[0],
pcontent->mac_addr[1],
pcontent->mac_addr[2],
pcontent->mac_addr[3],
pcontent->mac_addr[4],
pcontent->mac_addr[5]);
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_,
"[wow] ndp_remote_ipv6 %02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x \n",
pcontent->remote_ipv6_addr[0],
pcontent->remote_ipv6_addr[1],
pcontent->remote_ipv6_addr[2],
pcontent->remote_ipv6_addr[3],
pcontent->remote_ipv6_addr[4],
pcontent->remote_ipv6_addr[5],
pcontent->remote_ipv6_addr[6],
pcontent->remote_ipv6_addr[7],
pcontent->remote_ipv6_addr[8],
pcontent->remote_ipv6_addr[9],
pcontent->remote_ipv6_addr[10],
pcontent->remote_ipv6_addr[11],
pcontent->remote_ipv6_addr[12],
pcontent->remote_ipv6_addr[13],
pcontent->remote_ipv6_addr[14],
pcontent->remote_ipv6_addr[15]);
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_,
"[wow] ndp_target_ipv6_addr %02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x \n",
pcontent->target_ipv6_addr[0][0],
pcontent->target_ipv6_addr[0][1],
pcontent->target_ipv6_addr[0][2],
pcontent->target_ipv6_addr[0][3],
pcontent->target_ipv6_addr[0][4],
pcontent->target_ipv6_addr[0][5],
pcontent->target_ipv6_addr[0][6],
pcontent->target_ipv6_addr[0][7],
pcontent->target_ipv6_addr[0][8],
pcontent->target_ipv6_addr[0][9],
pcontent->target_ipv6_addr[0][10],
pcontent->target_ipv6_addr[0][11],
pcontent->target_ipv6_addr[0][12],
pcontent->target_ipv6_addr[0][13],
pcontent->target_ipv6_addr[0][14],
pcontent->target_ipv6_addr[0][15]);
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_,
"[wow] ndp_target_ipv6_addr %02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x \n",
pcontent->target_ipv6_addr[1][0],
pcontent->target_ipv6_addr[1][1],
pcontent->target_ipv6_addr[1][2],
pcontent->target_ipv6_addr[1][3],
pcontent->target_ipv6_addr[1][4],
pcontent->target_ipv6_addr[1][5],
pcontent->target_ipv6_addr[1][6],
pcontent->target_ipv6_addr[1][7],
pcontent->target_ipv6_addr[1][8],
pcontent->target_ipv6_addr[1][9],
pcontent->target_ipv6_addr[1][10],
pcontent->target_ipv6_addr[1][11],
pcontent->target_ipv6_addr[1][12],
pcontent->target_ipv6_addr[1][13],
pcontent->target_ipv6_addr[1][14],
pcontent->target_ipv6_addr[1][15]);
}
}
u8 _phl_query_free_cam_entry_idx(struct rtw_pattern_match_info *pattern_match_info)
{
struct rtw_wowcam_upd_info *wowcam_info = pattern_match_info->wowcam_info;
u8 i = 0;
for (i = 0; i < MAX_WOW_CAM_NUM; ++i)
if (wowcam_info[i].valid == 0)
break;
return i;
}
u16 _phl_cal_crc16(u8 data, u16 crc)
{
u8 shift_in, data_bit;
u8 crc_bit4, crc_bit11, crc_bit15;
u16 crc_result;
int index;
for (index = 0; index < 8; index++) {
crc_bit15 = ((crc & BIT15) ? 1 : 0);
data_bit = (data & (BIT0 << index) ? 1 : 0);
shift_in = crc_bit15 ^ data_bit;
/*printf("crc_bit15=%d, DataBit=%d, shift_in=%d\n",
* crc_bit15, data_bit, shift_in);*/
crc_result = crc << 1;
if (shift_in == 0)
crc_result &= (~BIT0);
else
crc_result |= BIT0;
/*printf("CRC =%x\n",CRC_Result);*/
crc_bit11 = ((crc & BIT11) ? 1 : 0) ^ shift_in;
if (crc_bit11 == 0)
crc_result &= (~BIT12);
else
crc_result |= BIT12;
/*printf("bit12 CRC =%x\n",CRC_Result);*/
crc_bit4 = ((crc & BIT4) ? 1 : 0) ^ shift_in;
if (crc_bit4 == 0)
crc_result &= (~BIT5);
else
crc_result |= BIT5;
/* printf("bit5 CRC =%x\n",CRC_Result); */
crc = crc_result;
}
return crc;
}
u16 _phl_cal_wow_ptrn_crc(u8 *pattern, u32 length)
{
u16 crc = 0xffff;
u32 i;
for (i = 0; i < length; i++)
crc = _phl_cal_crc16(pattern[i], crc);
crc = ~crc;
return crc;
}
/*
* To get the wake up pattern from the mask.
* We do not count first 12 bits which means
* DA[6] and SA[6] in the pattern to match HW design.
*/
u32 _phl_get_ptrn_after_mask(struct rtw_wowcam_upd_info *wowcam_info, u8 *ptrn_after_mask)
{
u32 ptrn_len_after_mask = 0;
u32 i;
u8 da_sa_offset = 12;
for (i = da_sa_offset; i < wowcam_info->ptrn_len; i++) {
if (wowcam_info->mask[i / 8] >> (i % 8) & 0x01) {
ptrn_after_mask[ptrn_len_after_mask] = wowcam_info->ptrn[i];
ptrn_len_after_mask++;
}
}
return ptrn_len_after_mask;
}
/*
* translate mask from os to mask for hw
*
* pattern from OS uses 'ethenet frame', like this:
* | 6 | 6 | 2 | 20 | Variable | 4 |
* |--------+--------+------+-----------+------------+-----|
* | 802.3 Mac Header | IP Header | TCP Packet | FCS |
* | DA | SA | Type |
*
* BUT, packet catched by our HW is in '802.11 frame', begin from LLC,
* | 24 or 30 | 6 | 2 | 20 | Variable | 4 |
* |-------------------+--------+------+-----------+------------+-----|
* | 802.11 MAC Header | LLC | IP Header | TCP Packet | FCS |
* | Others | Type |
*
* Therefore, we need to translate mask_from_OS to mask_to_hw.
* We should left-shift mask_from_os by 6 bits to omit 'DA',
* to make it correspond to 'LLC' of mask_to_hw.
* Our HW packet begins from LLC, mask_to_hw[5:0] is part of LLC,
* but mask_from_os[5:0] is 'SA' after left-shift.
* They just don't match, so we need to set first 5 bits to 0.
*/
void _phl_to_hw_wake_mask(struct rtw_wowcam_upd_info *wowcam_info)
{
u8 mask_hw[MAX_WOW_PATTERN_SIZE_BYTE] = {0};
u32 mask_len = _os_div_round_up(wowcam_info->ptrn_len, 8);
u32 i;
u8 sa_offset = 6;
for (i = 0; i < mask_len - 1; i++) {
mask_hw[i] = wowcam_info->mask[i] >> sa_offset;
mask_hw[i] |= (wowcam_info->mask[i + 1] & 0x3F) << 2;
}
mask_hw[i] = (wowcam_info->mask[i] >> sa_offset) & 0x3F;
mask_hw[0] &= 0xC0;
for (i = 0; i < MAX_WOW_PATTERN_SIZE_DWORD; i++) {
wowcam_info->wake_mask[i] = mask_hw[i * 4];
wowcam_info->wake_mask[i] |= (mask_hw[i * 4 + 1] << 8);
wowcam_info->wake_mask[i] |= (mask_hw[i * 4 + 2] << 16);
wowcam_info->wake_mask[i] |= (mask_hw[i * 4 + 3] << 24);
}
}
enum rtw_phl_status rtw_phl_remove_wow_ptrn_info(void *phl, u8 wowcam_id)
{
enum rtw_phl_status phl_status = RTW_PHL_STATUS_FAILURE;
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct phl_wow_info *wow_info = phl_to_wow_info(phl_info);
struct rtw_pattern_match_info *pattern_match_info = &wow_info->pattern_match_info;
struct rtw_wowcam_upd_info *wowcam_info = &(pattern_match_info->wowcam_info[wowcam_id]);
if (wowcam_id < MAX_WOW_CAM_NUM) {
wowcam_info->valid = 0;
phl_status = RTW_PHL_STATUS_SUCCESS;
} else {
PHL_TRACE(COMP_PHL_WOW, _PHL_WARNING_, "[wow] %s(): Invalid wowcam id(%u), Fail.\n",
__func__, wowcam_id);
phl_status = RTW_PHL_STATUS_FAILURE;
}
return phl_status;
}
enum rtw_phl_status rtw_phl_add_wow_ptrn_info(void *phl, struct rtw_wowcam_upd_info *info, u8 *wowcam_id)
{
enum rtw_phl_status phl_status = RTW_PHL_STATUS_FAILURE;
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct phl_wow_info *wow_info = phl_to_wow_info(phl_info);
struct rtw_pattern_match_info *pattern_match_info = &wow_info->pattern_match_info;
struct rtw_wowcam_upd_info *wowcam_info = NULL;
void *d = phl_to_drvpriv(phl_info);
u8 ptrn_after_mask[MAX_WOW_PATTERN_SIZE_BIT] = {0};
u32 ptrn_len_after_mask = 0;
*wowcam_id = _phl_query_free_cam_entry_idx(pattern_match_info);
if (*wowcam_id < MAX_WOW_CAM_NUM) {
wowcam_info = &(pattern_match_info->wowcam_info[*wowcam_id]);
_os_mem_set(d, wowcam_info, 0, sizeof(struct rtw_wowcam_upd_info));
_os_mem_cpy(d, wowcam_info, info, sizeof(struct rtw_wowcam_upd_info));
ptrn_len_after_mask = _phl_get_ptrn_after_mask(wowcam_info, ptrn_after_mask);
wowcam_info->match_crc = _phl_cal_wow_ptrn_crc(ptrn_after_mask, ptrn_len_after_mask);
_phl_to_hw_wake_mask(wowcam_info);
/* fill in phl */
wowcam_info->wow_cam_idx = *wowcam_id;
wowcam_info->rw = 1;
wowcam_info->is_negative_pattern_match = 0;
wowcam_info->skip_mac_hdr = 1;
wowcam_info->valid = 1;
phl_status = RTW_PHL_STATUS_SUCCESS;
} else {
PHL_TRACE(COMP_PHL_WOW, _PHL_WARNING_, "[wow] no free cam entry can be used.\n");
phl_status = RTW_PHL_STATUS_RESOURCE;
}
return phl_status;
}
enum rtw_phl_status rtw_phl_cfg_gtk_ofld_info(void *phl, struct rtw_gtk_ofld_info *info)
{
enum rtw_phl_status phl_status = RTW_PHL_STATUS_SUCCESS;
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct phl_wow_info *wow_info = phl_to_wow_info(phl_info);
struct rtw_gtk_ofld_info *gtk_ofld_info = &wow_info->gtk_ofld_info;
void *d = phl_to_drvpriv(phl_info);
FUNCIN();
if (info == NULL || gtk_ofld_info == NULL) {
PHL_TRACE(COMP_PHL_WOW, _PHL_WARNING_, "[wow] %s(): some ptr is NULL\n", __func__);
phl_status = RTW_PHL_STATUS_FAILURE;
} else {
_os_mem_set(d, gtk_ofld_info, 0, sizeof(struct rtw_gtk_ofld_info));
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] gtk_en(%u), continue to gtk_ofld.\n", info->gtk_en);
if (info->gtk_en) {
_os_mem_cpy(d, gtk_ofld_info, info, sizeof(struct rtw_gtk_ofld_info));
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] gtk_ofld_info:\n");
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] - gtk_en = %u\n", gtk_ofld_info->gtk_en);
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] - tkip_en = %u\n", gtk_ofld_info->tkip_en);
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] - ieee80211w_en = %u\n", gtk_ofld_info->ieee80211w_en);
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] - pairwise_wakeup = %u\n", gtk_ofld_info->pairwise_wakeup);
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] - bip_sec_algo = %u\n", gtk_ofld_info->bip_sec_algo);
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] gtk_ofld_content:\n");
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] - akmtype_byte3 = %u\n", gtk_ofld_info->akmtype_byte3);
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] - kck_len = %u\n", gtk_ofld_info->gtk_ofld_content.kck_len);
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] - kek_len = %u\n", gtk_ofld_info->gtk_ofld_content.kek_len);
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] - replay_cnt = 0x%x%x\n",
*((u32 *)(gtk_ofld_info->gtk_ofld_content.replay_cnt)+1),
*((u32 *)(gtk_ofld_info->gtk_ofld_content.replay_cnt)));
if(info->ieee80211w_en) {
gtk_ofld_info->hw_11w_en = true;
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] - igtk_keyid = 0x%x\n",
*((u32 *)(gtk_ofld_info->gtk_ofld_content.igtk_keyid)));
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] - ipn = 0x%x%x\n",
*((u32 *)(gtk_ofld_info->gtk_ofld_content.ipn)+1),
*((u32 *)(gtk_ofld_info->gtk_ofld_content.ipn)));
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] - igtk_len = %u\n", gtk_ofld_info->gtk_ofld_content.igtk_len);
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] - psk_len = %u\n", gtk_ofld_info->gtk_ofld_content.psk_len);
}
} else {
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] gtk_ofld_info:\n");
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] - gtk_en = %u\n", gtk_ofld_info->gtk_en);
}
}
FUNCOUT();
return phl_status;
}
enum rtw_phl_status rtw_phl_cfg_realwow_info(void *phl, struct rtw_realwow_info *info)
{
enum rtw_phl_status phl_status = RTW_PHL_STATUS_SUCCESS;
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct phl_wow_info *wow_info = phl_to_wow_info(phl_info);
struct rtw_realwow_info *realwow_info = &wow_info->realwow_info;
void *d = phl_to_drvpriv(phl_info);
if (info == NULL || realwow_info == NULL) {
PHL_TRACE(COMP_PHL_WOW, _PHL_WARNING_, "[wow] %s(): some ptr is NULL\n", __func__);
phl_status = RTW_PHL_STATUS_FAILURE;
} else {
_os_mem_set(d, realwow_info, 0, sizeof(struct rtw_realwow_info));
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] realwow_en(%u), continue to realwow_ofld.\n",
info->realwow_en);
if (info->realwow_en) {
_os_mem_cpy(d, realwow_info, info, sizeof(struct rtw_realwow_info));
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] realwow_ofld_info:\n");
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] - realwow_en = %u\n",
realwow_info->realwow_en);
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] - tkip_en = %u\n",
realwow_info->auto_wakeup);
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] - interval = %u\n",
realwow_info->realwow_ofld_content.interval);
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] - kapktsize = %u\n",
realwow_info->realwow_ofld_content.keep_alive_pkt_size);
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] - acklostlimit = %u\n",
realwow_info->realwow_ofld_content.ack_lost_limit);
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] - ackpatternsize = %u\n",
realwow_info->realwow_ofld_content.ack_ptrn_size);
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] - wakeuppatternsize = %u\n",
realwow_info->realwow_ofld_content.wakeup_ptrn_size);
} else {
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] realwow_ofld_info:\n");
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] - realwow_en = %u\n",
realwow_info->realwow_en);
}
}
return phl_status;
}
enum rtw_phl_status rtw_phl_cfg_wow_wake(void *phl, struct rtw_wow_wake_info *info)
{
enum rtw_phl_status phl_status = RTW_PHL_STATUS_SUCCESS;
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct phl_wow_info *wow_info = phl_to_wow_info(phl_info);
void *d = phl_to_drvpriv(phl_info);
struct rtw_wow_wake_info *wow_wake_info = &wow_info->wow_wake_info;
FUNCIN();
wow_wake_info->wow_en = info->wow_en;
wow_wake_info->drop_all_pkt = info->drop_all_pkt;
wow_wake_info->rx_parse_after_wake = info->rx_parse_after_wake;
wow_wake_info->pairwise_sec_algo = info->pairwise_sec_algo;
wow_wake_info->group_sec_algo = info->group_sec_algo;
wow_wake_info->pattern_match_en = info->pattern_match_en;
wow_wake_info->magic_pkt_en = info->magic_pkt_en;
wow_wake_info->hw_unicast_en = info->hw_unicast_en;
wow_wake_info->fw_unicast_en = info->fw_unicast_en;
wow_wake_info->deauth_wakeup = info->deauth_wakeup;
wow_wake_info->rekey_wakeup = info->rekey_wakeup;
wow_wake_info->eap_wakeup = info->eap_wakeup;
wow_wake_info->all_data_wakeup = info->all_data_wakeup;
_os_mem_cpy(d, &wow_wake_info->remote_wake_ctrl_info,
&info->remote_wake_ctrl_info, sizeof(struct rtw_remote_wake_ctrl_info));
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] wow_en %d\n", wow_wake_info->wow_en);
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] drop_all_pkt %d\n", wow_wake_info->drop_all_pkt);
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] rx_parse_after_wake %d\n", wow_wake_info->rx_parse_after_wake);
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] pairwise_sec_algo %d\n", wow_wake_info->pairwise_sec_algo);
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] group_sec_algo %d\n", wow_wake_info->group_sec_algo);
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] bip_sec_algo %d\n", wow_wake_info->bip_sec_algo);
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] pattern_match_en %d\n", wow_wake_info->pattern_match_en);
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] magic_pkt_en %d\n", wow_wake_info->magic_pkt_en);
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] hw_unicast_en %d\n", wow_wake_info->hw_unicast_en);
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] fw_unicast_en %d\n", wow_wake_info->fw_unicast_en);
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] deauth_wakeup %d\n", wow_wake_info->deauth_wakeup);
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] rekey_wakeup %d\n", wow_wake_info->rekey_wakeup);
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] eap_wakeup %d\n", wow_wake_info->eap_wakeup);
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] all_data_wakeup %d\n", wow_wake_info->all_data_wakeup);
return phl_status;
}
enum rtw_phl_status rtw_phl_cfg_gpio_wake_pulse(void *phl, struct rtw_wow_gpio_info *info)
{
enum rtw_phl_status phl_status = RTW_PHL_STATUS_SUCCESS;
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct phl_wow_info *wow_info = phl_to_wow_info(phl_info);
struct rtw_wow_gpio_info *wow_gpio = &wow_info->wow_gpio;
FUNCIN();
wow_gpio->dev2hst_gpio_en = info->dev2hst_gpio_en;
wow_gpio->disable_inband = info->disable_inband;
wow_gpio->gpio_output_input = info->gpio_output_input;
wow_gpio->gpio_active = info->gpio_active;
wow_gpio->toggle_pulse = info->toggle_pulse;
wow_gpio->data_pin_wakeup = info->data_pin_wakeup;
wow_gpio->gpio_pulse_nonstop = info->gpio_pulse_nonstop;
wow_gpio->gpio_time_unit = info->gpio_time_unit;
wow_gpio->gpio_num = info->gpio_num;
wow_gpio->gpio_pulse_dura = info->gpio_pulse_dura;
wow_gpio->gpio_pulse_period = info->gpio_pulse_period;
wow_gpio->gpio_pulse_count = info->gpio_pulse_count;
wow_gpio->customer_id = info->customer_id;
wow_gpio->gpio_pulse_en_a = info->gpio_pulse_en_a;
wow_gpio->gpio_duration_unit_a = info->gpio_duration_unit_a;
wow_gpio->gpio_pulse_nonstop_a = info->gpio_pulse_nonstop_a;
wow_gpio->special_reason_a = info->special_reason_a;
wow_gpio->gpio_duration_a = info->gpio_duration_a;
wow_gpio->gpio_pulse_count_a = info->gpio_pulse_count_a;
wow_gpio->dev2hst_gpio_mode = info->dev2hst_gpio_mode;
wow_gpio->dev2hst_gpio = info->dev2hst_gpio;
wow_gpio->dev2hst_high = info->dev2hst_high;
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] dev2hst_gpio_en %d\n", wow_gpio->dev2hst_gpio_en);
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] disable_inband %d\n", wow_gpio->disable_inband);
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] gpio_output_input %d\n", wow_gpio->gpio_output_input);
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] gpio_active %d\n", wow_gpio->gpio_active);
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] toggle_pulse %d\n", wow_gpio->toggle_pulse);
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] data_pin_wakeup %d\n", wow_gpio->data_pin_wakeup);
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] gpio_pulse_nonstop %d\n", wow_gpio->gpio_pulse_nonstop);
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] gpio_time_unit %d\n", wow_gpio->gpio_time_unit);
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] gpio_num %d\n", wow_gpio->gpio_num);
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] gpio_pulse_dura %d\n", wow_gpio->gpio_pulse_dura);
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] gpio_pulse_period %d\n", wow_gpio->gpio_pulse_period);
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] gpio_pulse_count %d\n", wow_gpio->gpio_pulse_count);
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] customer_id %d\n", wow_gpio->customer_id);
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] gpio_pulse_en_a %d\n", wow_gpio->gpio_pulse_en_a);
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] gpio_duration_unit_a %d\n", wow_gpio->gpio_duration_unit_a);
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] gpio_pulse_nonstop_a %d\n", wow_gpio->gpio_pulse_nonstop_a);
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] special_reason_a %d\n", wow_gpio->special_reason_a);
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] gpio_duration_a %d\n", wow_gpio->gpio_duration_a);
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] gpio_pulse_count_a %d\n", wow_gpio->gpio_pulse_count_a);
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] dev2hst_gpio_mode %d\n", wow_gpio->dev2hst_gpio_mode);
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] dev2hst_gpio %d\n", wow_gpio->dev2hst_gpio);
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] dev2hst_high %d\n", wow_gpio->dev2hst_high);
return phl_status;
}
void phl_record_wow_stat(struct phl_wow_info *wow_info)
{
struct phl_wow_stat *wow_stat = &wow_info->wow_stat;
/* init */
wow_stat->func_en = wow_info->func_en;
wow_stat->op_mode = wow_info->op_mode;
wow_stat->keep_alive_en = wow_info->keep_alive_info.keep_alive_en;
wow_stat->disc_det_en = wow_info->disc_det_info.disc_det_en;
wow_stat->arp_en = wow_info->arp_ofld_info.arp_en;
wow_stat->ndp_en = wow_info->ndp_ofld_info.ndp_en;
wow_stat->gtk_en = wow_info->gtk_ofld_info.gtk_en;
wow_stat->dot11w_en = wow_info->gtk_ofld_info.ieee80211w_en;
wow_stat->err.init = wow_info->err.init;
/* deinit */
wow_stat->mac_pwr = wow_info->mac_pwr;
wow_stat->wake_rsn = wow_info->wake_rsn;
wow_stat->err.deinit = wow_info->err.deinit;
if (wow_info->aoac_info.rpt_fail)
++wow_stat->aoac_rpt_fail_cnt;
}
#ifdef CONFIG_PCI_HCI
enum rtw_phl_status _init_precfg(struct phl_info_t *phl_info, u8 band)
{
enum rtw_hal_status hstatus = RTW_HAL_STATUS_FAILURE;
enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
u8 status = false;
do {
/* 1. stop Tx DMA */
rtw_hal_wow_cfg_txdma(phl_info->hal, false);
/* 2. stop HW Tx */
hstatus = rtw_hal_wow_drop_tx(phl_info->hal, band);
if (RTW_HAL_STATUS_SUCCESS != hstatus) {
PHL_ERR("[wow] rtw_hal_wow_drop_tx fail!\n");
break;
}
/* 3. poll dma idle */
status = rtw_hal_poll_txdma_idle(phl_info->hal);
if (!status) {
PHL_ERR("[wow] rtw_hal_poll_txdma_idle fail!\n");
break;
}
} while (0);
if (RTW_HAL_STATUS_SUCCESS != hstatus)
pstatus = RTW_PHL_STATUS_FAILURE;
else
pstatus = RTW_PHL_STATUS_SUCCESS;
FUNCOUT_WSTS(pstatus);
return pstatus;
}
enum rtw_phl_status _init_postcfg(struct phl_info_t *phl_info)
{
/* stop tx/rx hci */
rtw_hal_cfg_txhci(phl_info->hal, false);
rtw_hal_cfg_rxhci(phl_info->hal, false);
/* stop dma io */
rtw_hal_cfg_dma_io(phl_info->hal, false);
return RTW_PHL_STATUS_SUCCESS;
}
#elif defined(CONFIG_USB_HCI)
enum rtw_phl_status _init_precfg(struct phl_info_t *phl_info, u8 band)
{
return RTW_PHL_STATUS_SUCCESS;
}
enum rtw_phl_status _init_postcfg(struct phl_info_t *phl_info)
{
return RTW_PHL_STATUS_SUCCESS;
}
#elif defined(CONFIG_SDIO_HCI)
enum rtw_phl_status _init_precfg(struct phl_info_t *phl_info, u8 band)
{
return RTW_PHL_STATUS_SUCCESS;
}
enum rtw_phl_status _init_postcfg(struct phl_info_t *phl_info)
{
return RTW_PHL_STATUS_SUCCESS;
}
#endif
static enum rtw_phl_status _init_precfg_set_rxfltr(struct phl_info_t *phl_info)
{
enum rtw_hal_status hstatus = RTW_HAL_STATUS_SUCCESS;
do {
hstatus = rtw_hal_set_rxfltr_by_type(phl_info->hal, 0, RTW_PHL_PKT_TYPE_DATA, 0);
if (RTW_HAL_STATUS_SUCCESS != hstatus) {
PHL_ERR("[wow] set rx filter data drop fail, status(%u)\n", hstatus);
break;
}
hstatus = rtw_hal_set_rxfltr_by_type(phl_info->hal, 0, RTW_PHL_PKT_TYPE_MGNT, 0);
if (RTW_HAL_STATUS_SUCCESS != hstatus) {
PHL_ERR("[wow] set rx filter mgnt drop fail, status(%u)\n", hstatus);
break;
}
hstatus = rtw_hal_set_rxfltr_by_type(phl_info->hal, 0, RTW_PHL_PKT_TYPE_CTRL, 0);
if (RTW_HAL_STATUS_SUCCESS != hstatus) {
PHL_ERR("[wow] set rx filter ctrl drop fail, status(%u)\n", hstatus);
break;
}
} while (0);
return (hstatus == RTW_HAL_STATUS_SUCCESS) ?
RTW_PHL_STATUS_SUCCESS : RTW_PHL_STATUS_FAILURE;
}
static enum rtw_phl_status _init_postcfg_set_rxfltr(struct phl_info_t *phl_info)
{
enum rtw_hal_status hstatus = RTW_HAL_STATUS_SUCCESS;
do {
hstatus = rtw_hal_set_rxfltr_by_type(phl_info->hal, 0, RTW_PHL_PKT_TYPE_DATA, 1);
if (RTW_HAL_STATUS_SUCCESS != hstatus) {
PHL_ERR("[wow] set rx filter data to host fail, status(%u)\n", hstatus);
break;
}
hstatus = rtw_hal_set_rxfltr_by_type(phl_info->hal, 0, RTW_PHL_PKT_TYPE_MGNT, 1);
if (RTW_HAL_STATUS_SUCCESS != hstatus) {
PHL_ERR("[wow] set rx filter mgnt to host fail, status(%u)\n", hstatus);
break;
}
hstatus = rtw_hal_set_rxfltr_by_type(phl_info->hal, 0, RTW_PHL_PKT_TYPE_CTRL, 1);
if (RTW_HAL_STATUS_SUCCESS != hstatus) {
PHL_ERR("[wow] set rx filter ctrl to host fail, status(%u)\n", hstatus);
break;
}
} while (0);
return (hstatus == RTW_HAL_STATUS_SUCCESS) ?
RTW_PHL_STATUS_SUCCESS : RTW_PHL_STATUS_FAILURE;
}
#define MAX_POLLING_TRX_STOP 2000 /* us */
enum rtw_phl_status phl_wow_init_precfg(struct phl_wow_info *wow_info)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_SUCCESS;
struct phl_info_t *phl_info = wow_info->phl_info;
struct phl_hci_trx_ops *trx_ops = phl_info->hci_trx_ops;
u32 wait_cnt = 0;
FUNCIN();
/* pause sw Tx */
trx_ops->req_tx_stop(phl_info);
/* schedule current existing tx handler */
pstatus = rtw_phl_tx_req_notify(phl_info);
if (RTW_PHL_STATUS_SUCCESS != pstatus)
PHL_ERR("[wow] rtw_phl_tx_req_notify fail, status(%u)\n", pstatus);
/* polling pause sw Tx done */
while (wait_cnt < MAX_POLLING_TRX_STOP) {
if (trx_ops->is_tx_pause(phl_info)) {
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] sw tx pause succeed.\n");
break;
}
_os_delay_us(phl_info->phl_com->drv_priv, 1);
wait_cnt++;
}
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] stop sw tx wait_cnt %d.\n", wait_cnt);
/* init pre-configuration for different interfaces */
pstatus = _init_precfg(phl_info, wow_info->sta->wrole->hw_band);
if (RTW_PHL_STATUS_SUCCESS != pstatus)
return pstatus;
/* set packet drop by setting rx filter */
pstatus = _init_precfg_set_rxfltr(phl_info);
if (RTW_PHL_STATUS_SUCCESS != pstatus)
return pstatus;
/* disable ppdu sts */
rtw_hal_ppdu_sts_cfg(phl_info->hal, wow_info->sta->wrole->hw_band, false);
pstatus = RTW_PHL_STATUS_SUCCESS;
return pstatus;
}
enum rtw_phl_status phl_wow_init_postcfg(struct phl_wow_info *wow_info)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
enum rtw_hal_status hstatus = RTW_HAL_STATUS_FAILURE;
struct phl_info_t *phl_info = wow_info->phl_info;
struct phl_hci_trx_ops *trx_ops = phl_info->hci_trx_ops;
u32 wait_cnt = 0;
#ifdef CONFIG_SYNC_INTERRUPT
struct rtw_phl_evt_ops *evt_ops = &phl_info->phl_com->evt_ops;
#endif /* CONFIG_SYNC_INTERRUPT */
/* disable interrupt */
#ifdef CONFIG_SYNC_INTERRUPT
evt_ops->set_interrupt_caps(phl_to_drvpriv(phl_info), false);
#else
rtw_hal_disable_interrupt(phl_info->phl_com, phl_info->hal);
#endif /* CONFIG_SYNC_INTERRUPT */
pstatus = _init_postcfg(phl_info);
if (RTW_PHL_STATUS_SUCCESS != pstatus)
PHL_ERR("[wow] _init_postcfg failed.\n");
/* stop sw rx */
trx_ops->req_rx_stop(phl_info);
pstatus = rtw_phl_start_rx_process(phl_info);
if (RTW_PHL_STATUS_SUCCESS != pstatus)
PHL_ERR("[wow] rtw_phl_start_rx_process failed.\n");
while (wait_cnt < MAX_POLLING_TRX_STOP) {
if (trx_ops->is_rx_pause(phl_info)) {
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] sw rx pause succeed.\n");
break;
}
_os_delay_us(phl_info->phl_com->drv_priv, 1);
wait_cnt++;
}
if (wait_cnt == MAX_POLLING_TRX_STOP)
PHL_WARN("[wow] sw rx pause fail.\n");
/* configure wow sleep */
hstatus = rtw_hal_cfg_wow_sleep(phl_info->hal, true);
if (RTW_HAL_STATUS_SUCCESS != hstatus)
return RTW_PHL_STATUS_FAILURE;
/* forward rx packet to host by setting rx filter */
pstatus = _init_postcfg_set_rxfltr(phl_info);
/* reset trx */
#ifdef CONFIG_USB_HCI
trx_ops->trx_stop(phl_info);
#else
trx_ops->trx_reset(phl_info, PHL_CTRL_TX|PHL_CTRL_RX);
#endif
return pstatus;
}
static void _phl_indic_wake_sec_upd(struct phl_wow_info *wow_info, u8 aoac_report_get_ok, u8 rx_ready)
{
struct phl_info_t *phl_info = wow_info->phl_info;
struct rtw_phl_evt_ops *ops = &phl_info->phl_com->evt_ops;
void *drv_priv = phl_to_drvpriv(phl_info);
if (NULL != ops->wow_handle_sec_info_update)
ops->wow_handle_sec_info_update(drv_priv, &wow_info->aoac_info, aoac_report_get_ok, rx_ready);
else
PHL_TRACE(COMP_PHL_WOW, _PHL_ERR_, "[wow] %s : evt_ops->wow_handle_sec_info_update is NULL.\n"
, __func__);
}
static void _phl_handle_aoac_rpt_action(struct phl_wow_info *wow_info, bool rx_ready)
{
struct phl_info_t *phl_info = wow_info->phl_info;
enum rtw_hal_status hstatus = RTW_HAL_STATUS_FAILURE;
u8 aoac_report_get_ok = false;
static u8 phase_0_ok = false;
if (wow_info->wow_wake_info.pairwise_sec_algo) {
if (rx_ready == false) {
/* phase 0 */
hstatus = rtw_hal_get_wow_aoac_rpt(phl_info->hal, &wow_info->aoac_info, rx_ready);
aoac_report_get_ok = (hstatus == RTW_HAL_STATUS_SUCCESS) ? true : false;
_phl_indic_wake_sec_upd(wow_info, aoac_report_get_ok, rx_ready);
phase_0_ok = aoac_report_get_ok;
}
if (rx_ready == true) {
/* phase 1 */
if (phase_0_ok) {
hstatus = rtw_hal_get_wow_aoac_rpt(phl_info->hal, &wow_info->aoac_info, rx_ready);
aoac_report_get_ok = (hstatus == RTW_HAL_STATUS_SUCCESS) ? true : false;
_phl_indic_wake_sec_upd(wow_info, aoac_report_get_ok, rx_ready);
}
phase_0_ok = false;
wow_info->aoac_info.rpt_fail = (aoac_report_get_ok == false) ? true : false;
}
}
}
static enum rtw_phl_status _phl_indic_wake_rsn(struct phl_wow_info *wow_info)
{
enum rtw_phl_status phl_status = RTW_PHL_STATUS_SUCCESS;
struct phl_info_t *phl_info = wow_info->phl_info;
struct rtw_phl_evt_ops *evt_ops = &(phl_info->phl_com->evt_ops);
FUNCIN_WSTS(phl_status);
if (NULL != evt_ops->indicate_wake_rsn) {
evt_ops->indicate_wake_rsn(phl_to_drvpriv(phl_info), wow_info->wake_rsn);
}
FUNCOUT_WSTS(phl_status);
return phl_status;
}
void phl_wow_handle_wake_rsn(struct phl_wow_info *wow_info, u8 *reset)
{
struct phl_info_t *phl_info = wow_info->phl_info;
rtw_hal_get_wake_rsn(phl_info->hal, &wow_info->wake_rsn, reset);
_phl_indic_wake_rsn(wow_info);
}
#ifdef CONFIG_PCI_HCI
enum rtw_phl_status _deinit_precfg(struct phl_info_t *phl_info)
{
#ifdef DBG_RST_BDRAM_TIME
u32 rst_bdram_start = _os_get_cur_time_ms();
#endif /* DBG_RST_BDRAM_TIME */
rtw_hal_clear_bdidx(phl_info->hal);
#ifdef DBG_RST_BDRAM_TIME
rst_bdram_start = _os_get_cur_time_ms();
#endif
rtw_hal_rst_bdram(phl_info->hal);
#ifdef DBG_RST_BDRAM_TIME
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] %s : Reset bdram takes %u (ms).\n"
, __func__, phl_get_passing_time_ms(rst_bdram_start));
#endif
rtw_hal_cfg_dma_io(phl_info->hal, true);
rtw_hal_cfg_txhci(phl_info->hal, true);
rtw_hal_cfg_rxhci(phl_info->hal, true);
/* start tx dma */
rtw_hal_wow_cfg_txdma(phl_info->hal, true);
return RTW_PHL_STATUS_SUCCESS;
}
#elif defined(CONFIG_USB_HCI)
enum rtw_phl_status _deinit_precfg(struct phl_info_t *phl_info)
{
return RTW_PHL_STATUS_SUCCESS;
}
#elif defined(CONFIG_SDIO_HCI)
enum rtw_phl_status _deinit_precfg(struct phl_info_t *phl_info)
{
return RTW_PHL_STATUS_SUCCESS;
}
#endif
void _deinit_precfg_set_intr(struct phl_info_t *phl_info)
{
#ifdef CONFIG_SYNC_INTERRUPT
struct rtw_phl_evt_ops *evt_ops = &phl_info->phl_com->evt_ops;
#endif /* CONFIG_SYNC_INTERRUPT */
rtw_hal_set_default_var(phl_info->hal, SET_DEF_RSN_WOW_RESUME_HNDL_RX);
#ifdef CONFIG_SYNC_INTERRUPT
evt_ops->set_interrupt_caps(phl_to_drvpriv(phl_info), true);
#else
rtw_hal_enable_interrupt(phl_info->phl_com, phl_info->hal);
#endif /* CONFIG_SYNC_INTERRUPT */
}
enum rtw_phl_status phl_wow_deinit_precfg(struct phl_wow_info *wow_info)
{
enum rtw_phl_status phl_status = RTW_PHL_STATUS_SUCCESS;
struct phl_info_t *phl_info = wow_info->phl_info;
struct phl_hci_trx_ops *trx_ops = phl_info->hci_trx_ops;
FUNCIN();
_deinit_precfg(phl_info);
rtw_hal_cfg_wow_sleep(phl_info->hal, false);
_phl_handle_aoac_rpt_action(wow_info, false);
/* resume sw rx */
#ifdef CONFIG_USB_HCI
trx_ops->trx_cfg(phl_info);
#else
trx_ops->trx_resume(phl_info, PHL_CTRL_RX);
#endif
_deinit_precfg_set_intr(phl_info);
_phl_handle_aoac_rpt_action(wow_info, true);
return phl_status;
}
void phl_reset_wow_info(struct phl_wow_info *wow_info)
{
struct phl_info_t *phl_info = wow_info->phl_info;
void *d = phl_to_drvpriv(phl_info);
wow_info->func_en = 0;
wow_info->op_mode = RTW_WOW_OP_NONE;
wow_info->mac_pwr = RTW_MAC_PWR_NONE;
wow_info->ps_pwr_lvl = PS_PWR_LVL_MAX;
_os_mem_set(d, &wow_info->err, 0, sizeof(struct phl_wow_error));
_os_mem_set(d, &wow_info->keep_alive_info, 0, sizeof(struct rtw_keep_alive_info));
_os_mem_set(d, &wow_info->disc_det_info, 0, sizeof(struct rtw_disc_det_info));
_os_mem_set(d, &wow_info->nlo_info, 0, sizeof(struct rtw_nlo_info));
_os_mem_set(d, &wow_info->arp_ofld_info, 0, sizeof(struct rtw_arp_ofld_info));
_os_mem_set(d, &wow_info->ndp_ofld_info, 0, sizeof(struct rtw_ndp_ofld_info));
_os_mem_set(d, &wow_info->gtk_ofld_info, 0, sizeof(struct rtw_gtk_ofld_info));
_os_mem_set(d, &wow_info->realwow_info, 0, sizeof(struct rtw_realwow_info));
_os_mem_set(d, &wow_info->wow_wake_info, 0, sizeof(struct rtw_wow_wake_info));
_os_mem_set(d, &wow_info->aoac_info, 0, sizeof(struct rtw_aoac_report));
wow_info->wake_rsn = RTW_WOW_RSN_UNKNOWN;
/*
&wow_info->pattern_match_info need not to be reset here.
We expect those items will be remove triggered by core layer
*/
}
void _deinit_postcfg_set_intr(struct phl_info_t *phl_info)
{
#ifdef CONFIG_SYNC_INTERRUPT
struct rtw_phl_evt_ops *evt_ops = &phl_info->phl_com->evt_ops;
#endif /* CONFIG_SYNC_INTERRUPT */
#ifdef CONFIG_SYNC_INTERRUPT
evt_ops->set_interrupt_caps(phl_to_drvpriv(phl_info), false);
#else
rtw_hal_disable_interrupt(phl_info->phl_com, phl_info->hal);
#endif /* CONFIG_SYNC_INTERRUPT */
rtw_hal_set_default_var(phl_info->hal, SET_DEF_RSN_WOW_RESUME_DONE);
#ifdef CONFIG_SYNC_INTERRUPT
evt_ops->set_interrupt_caps(phl_to_drvpriv(phl_info), true);
#else
rtw_hal_enable_interrupt(phl_info->phl_com, phl_info->hal);
#endif /* CONFIG_SYNC_INTERRUPT */
}
enum rtw_phl_status phl_wow_deinit_postcfg(struct phl_wow_info *wow_info)
{
enum rtw_phl_status phl_status = RTW_PHL_STATUS_SUCCESS;
struct phl_info_t *phl_info = wow_info->phl_info;
struct phl_hci_trx_ops *trx_ops = phl_info->hci_trx_ops;
FUNCIN();
/* resume sw tx */
trx_ops->trx_resume(phl_info, PHL_CTRL_TX);
/* enable ppdu sts */
rtw_hal_ppdu_sts_cfg(phl_info->hal, wow_info->sta->wrole->hw_band, true);
_deinit_postcfg_set_intr(phl_info);
return phl_status;
}
u8 phl_wow_nlo_exist(struct phl_wow_info *wow_info)
{
return wow_info->nlo_info.nlo_exist;
}
enum rtw_phl_status _phl_wow_cfg_pkt_ofld(struct phl_wow_info *wow_info, u8 pkt_type, u8 *pkt_id, void *buf)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
u16 macid = wow_info->sta->macid;
u32 *token;
switch(pkt_type) {
case PKT_TYPE_NULL_DATA:
token = &wow_info->null_pkt_token;
break;
case PKT_TYPE_ARP_RSP:
token = &wow_info->arp_pkt_token;
break;
case PKT_TYPE_NDP:
token = &wow_info->ndp_pkt_token;
break;
case PKT_TYPE_EAPOL_KEY:
token = &wow_info->eapol_key_pkt_token;
break;
case PKT_TYPE_SA_QUERY:
token = &wow_info->sa_query_pkt_token;
break;
case PKT_TYPE_REALWOW_KAPKT:
token = &wow_info->kapkt_pkt_token;
break;
case PKT_TYPE_REALWOW_ACK:
token = &wow_info->ack_pkt_token;
break;
case PKT_TYPE_REALWOW_WP:
token = &wow_info->wp_token;
break;
default:
PHL_TRACE(COMP_PHL_WOW, _PHL_ERR_, "[wow] %s : unknown pkt_type %d.\n"
, __func__, pkt_type);
return pstatus;
}
pstatus = RTW_PHL_PKT_OFLD_REQ(wow_info->phl_info, macid, pkt_type, token, buf);
if (pstatus == RTW_PHL_STATUS_SUCCESS)
*pkt_id = phl_pkt_ofld_get_id(wow_info->phl_info, macid, pkt_type);
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] %s : pkt_type %s, pkt_id %d, token %u, status(%u)\n",
__func__, phl_get_pkt_ofld_str(pkt_type), *pkt_id, *token, pstatus);
return pstatus;
}
enum rtw_phl_status phl_wow_func_en(struct phl_wow_info *wow_info)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
enum rtw_hal_status hstatus = RTW_HAL_STATUS_FAILURE;
struct phl_info_t *phl_info = wow_info->phl_info;
struct rtw_phl_stainfo_t *sta = wow_info->sta;
struct rtw_pkt_ofld_null_info null_info = {0};
struct rtw_pkt_ofld_arp_rsp_info arp_rsp_info = {0};
struct rtw_pkt_ofld_na_info na_info = {0};
struct rtw_pkt_ofld_eapol_key_info eapol_key_info = {0};
struct rtw_pkt_ofld_sa_query_info sa_query_info = {0};
struct rtw_pkt_ofld_realwow_kapkt_info kapkt_info = {0};
struct rtw_pkt_ofld_realwow_ack_info ack_info = {0};
struct rtw_pkt_ofld_realwow_wp_info wakeup_info = {0};
struct rtw_hal_wow_cfg cfg;
FUNCIN();
if (!wow_info->wow_wake_info.wow_en) {
PHL_WARN("%s : wow func is not enabled!\n", __func__);
return pstatus;
}
do {
hstatus = rtw_hal_reset_pkt_ofld_state(phl_info->hal);
if (RTW_HAL_STATUS_SUCCESS != hstatus) {
pstatus = RTW_PHL_STATUS_FAILURE;
break;
}
if (wow_info->keep_alive_info.keep_alive_en) {
_phl_cfg_pkt_ofld_null_info(wow_info, sta, &null_info);
pstatus = _phl_wow_cfg_pkt_ofld(wow_info,
PKT_TYPE_NULL_DATA,
&wow_info->keep_alive_info.null_pkt_id,
(void *)&null_info);
if (pstatus != RTW_PHL_STATUS_SUCCESS)
break;
}
if (wow_info->arp_ofld_info.arp_en) {
_phl_cfg_pkt_ofld_arp_rsp_info(wow_info, sta, &arp_rsp_info);
pstatus = _phl_wow_cfg_pkt_ofld(wow_info,
PKT_TYPE_ARP_RSP,
&wow_info->arp_ofld_info.arp_rsp_id,
(void *)&arp_rsp_info);
if (pstatus != RTW_PHL_STATUS_SUCCESS)
break;
}
if (wow_info->ndp_ofld_info.ndp_en) {
_phl_cfg_pkt_ofld_na_info(wow_info, sta, &na_info);
pstatus = _phl_wow_cfg_pkt_ofld(wow_info,
PKT_TYPE_NDP, &wow_info->ndp_ofld_info.ndp_id,
(void *)&na_info);
if (pstatus != RTW_PHL_STATUS_SUCCESS)
break;
}
if (wow_info->gtk_ofld_info.gtk_en) {
_phl_cfg_pkt_ofld_eapol_key_info(wow_info, sta, &eapol_key_info);
pstatus = _phl_wow_cfg_pkt_ofld(wow_info,
PKT_TYPE_EAPOL_KEY, &wow_info->gtk_ofld_info.gtk_rsp_id,
(void *)&eapol_key_info);
if (pstatus != RTW_PHL_STATUS_SUCCESS)
break;
if (wow_info->gtk_ofld_info.ieee80211w_en) {
_phl_cfg_pkt_ofld_sa_query_info(wow_info, sta, &sa_query_info);
pstatus = _phl_wow_cfg_pkt_ofld(wow_info,
PKT_TYPE_SA_QUERY, &wow_info->gtk_ofld_info.sa_query_id,
(void *)&sa_query_info);
if (pstatus != RTW_PHL_STATUS_SUCCESS)
break;
}
}
if (wow_info->realwow_info.realwow_en) {
/* realwow keep alive */
_phl_cfg_pkt_ofld_realwow_kapkt_info(wow_info, sta, &kapkt_info);
pstatus = _phl_wow_cfg_pkt_ofld(wow_info,
PKT_TYPE_REALWOW_KAPKT,
&wow_info->realwow_info.keepalive_id,
(void *)&kapkt_info);
if (pstatus != RTW_PHL_STATUS_SUCCESS)
break;
/* realwow ack */
_phl_cfg_pkt_ofld_realwow_ack_info(wow_info, &ack_info);
pstatus = _phl_wow_cfg_pkt_ofld(wow_info,
PKT_TYPE_REALWOW_ACK,
&wow_info->realwow_info.ack_pattern_id,
(void *)&ack_info);
if (pstatus != RTW_PHL_STATUS_SUCCESS)
break;
/* realwow wake up */
_phl_cfg_pkt_ofld_realwow_wp_info(wow_info, &wakeup_info);
pstatus = _phl_wow_cfg_pkt_ofld(wow_info,
PKT_TYPE_REALWOW_WP,
&wow_info->realwow_info.wakeup_pattern_id,
(void *)&wakeup_info);
if (pstatus != RTW_PHL_STATUS_SUCCESS)
break;
}
cfg.keep_alive_cfg = &wow_info->keep_alive_info;
cfg.disc_det_cfg = &wow_info->disc_det_info;
cfg.nlo_cfg = &wow_info->nlo_info;
cfg.arp_ofld_cfg = &wow_info->arp_ofld_info;
cfg.ndp_ofld_cfg = &wow_info->ndp_ofld_info;
cfg.gtk_ofld_cfg = &wow_info->gtk_ofld_info;
cfg.realwow_cfg = &wow_info->realwow_info;
cfg.wow_wake_cfg = &wow_info->wow_wake_info;
cfg.pattern_match_info = &wow_info->pattern_match_info;
cfg.wow_gpio = &wow_info->wow_gpio;
hstatus = rtw_hal_wow_func_en(phl_info->phl_com, phl_info->hal, sta->macid, &cfg);
if (hstatus != RTW_HAL_STATUS_SUCCESS) {
PHL_ERR("rtw_hal_wow_func_en fail, status (%u)\n", hstatus);
pstatus = RTW_PHL_STATUS_FAILURE;
break;
}
hstatus = rtw_hal_wow_func_start(phl_info->phl_com, phl_info->hal, sta->macid, &cfg);
if (hstatus != RTW_HAL_STATUS_SUCCESS) {
PHL_ERR("rtw_hal_wow_func_start fail, status (%u)\n", hstatus);
pstatus = RTW_PHL_STATUS_FAILURE;
break;
}
wow_info->func_en = true;
pstatus = RTW_PHL_STATUS_SUCCESS;
} while (0);
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] %s status (%u).\n", __func__, pstatus);
return pstatus;
}
void phl_wow_func_dis(struct phl_wow_info *wow_info)
{
enum rtw_hal_status hstatus = RTW_HAL_STATUS_FAILURE;
struct phl_info_t *phl_info = wow_info->phl_info;
struct rtw_phl_stainfo_t *sta = wow_info->sta;
if (!wow_info->wow_wake_info.wow_en) {
PHL_WARN("%s : wow func is not enabled!\n", __func__);
return;
}
hstatus = rtw_hal_wow_func_dis(phl_info->phl_com, phl_info->hal, sta->macid);
if (hstatus != RTW_HAL_STATUS_SUCCESS)
PHL_ERR("rtw_hal_wow_func_dis fail, status (%u)\n", hstatus);
if (wow_info->keep_alive_info.keep_alive_en) {
phl_pkt_ofld_cancel(phl_info, sta->macid,
PKT_TYPE_NULL_DATA, &wow_info->null_pkt_token);
}
if (wow_info->arp_ofld_info.arp_en) {
phl_pkt_ofld_cancel(phl_info, sta->macid,
PKT_TYPE_ARP_RSP, &wow_info->arp_pkt_token);
}
if (wow_info->ndp_ofld_info.ndp_en) {
phl_pkt_ofld_cancel(phl_info, sta->macid,
PKT_TYPE_NDP, &wow_info->ndp_pkt_token);
}
if (wow_info->gtk_ofld_info.gtk_en) {
phl_pkt_ofld_cancel(phl_info, sta->macid,
PKT_TYPE_EAPOL_KEY, &wow_info->eapol_key_pkt_token);
if (wow_info->gtk_ofld_info.ieee80211w_en) {
phl_pkt_ofld_cancel(phl_info, sta->macid,
PKT_TYPE_SA_QUERY, &wow_info->sa_query_pkt_token);
}
}
if (wow_info->realwow_info.realwow_en) {
phl_pkt_ofld_cancel(phl_info, sta->macid,
PKT_TYPE_REALWOW_KAPKT, &wow_info->kapkt_pkt_token);
phl_pkt_ofld_cancel(phl_info, sta->macid,
PKT_TYPE_REALWOW_ACK, &wow_info->ack_pkt_token);
phl_pkt_ofld_cancel(phl_info, sta->macid,
PKT_TYPE_REALWOW_WP, &wow_info->wp_token);
}
hstatus = rtw_hal_wow_func_stop(phl_info->phl_com, phl_info->hal, sta->macid);
if (hstatus != RTW_HAL_STATUS_SUCCESS)
PHL_ERR("rtw_hal_wow_func_stop fail, status (%u)\n", hstatus);
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] %s done.\n", __func__);
}
void phl_wow_decide_op_mode(struct phl_wow_info *wow_info, struct rtw_phl_stainfo_t *sta)
{
u8 nlo_exist = phl_wow_nlo_exist(wow_info);
enum mlme_state mstat = sta->wrole->mstate;
wow_info->sta = sta;
if (mstat == MLME_NO_LINK && !nlo_exist) {
wow_info->op_mode = RTW_WOW_OP_PWR_DOWN;
} else if (mstat == MLME_NO_LINK && nlo_exist) {
wow_info->op_mode = RTW_WOW_OP_DISCONNECT_STBY;
} else if (mstat == MLME_LINKED) {
wow_info->op_mode = RTW_WOW_OP_CONNECT_STBY;
} else {
wow_info->op_mode = RTW_WOW_OP_PWR_DOWN;
}
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] %s op mode set to %d\n.",
__func__, wow_info->op_mode);
}
#ifdef CONFIG_POWER_SAVE
void phl_wow_ps_pctl_cfg(struct phl_wow_info *wow_info, u8 enter_wow)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
struct phl_info_t *phl_info = wow_info->phl_info;
struct ps_cfg cfg = {0};
struct rtw_ps_cap_t *ps_cap = _get_ps_cap(phl_info);
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] %s : op mode %d.\n.",
__func__, wow_info->op_mode);
if (wow_info->op_mode == RTW_WOW_OP_DISCONNECT_STBY) {
/* IPS */
} else if (wow_info->op_mode == RTW_WOW_OP_CONNECT_STBY) {
/* LPS */
if (ps_cap->lps_wow_en) {
cfg.macid = wow_info->sta->macid;
cfg.awake_interval = ps_cap->lps_wow_awake_interval;
cfg.listen_bcn_mode = ps_cap->lps_wow_listen_bcn_mode;
cfg.smart_ps_mode = ps_cap->lps_wow_smart_ps_mode;
pstatus = phl_ps_lps_cfg(phl_info, &cfg, enter_wow);
}
} else {
PHL_ERR("%s : undefined wowlan op mode.\n", __func__);
}
}
void phl_wow_ps_pwr_cfg(struct phl_wow_info *wow_info, u8 enter_wow)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
struct phl_info_t *phl_info = wow_info->phl_info;
struct rtw_ps_cap_t *ps_cap = _get_ps_cap(phl_info);
wow_info->ps_pwr_lvl = PS_PWR_LVL_PWRON;
if (wow_info->op_mode == RTW_WOW_OP_DISCONNECT_STBY) {
/* IPS */
} else if (wow_info->op_mode == RTW_WOW_OP_CONNECT_STBY) {
if (ps_cap->lps_wow_en) {
wow_info->ps_pwr_lvl = phl_ps_judge_pwr_lvl(ps_cap->lps_wow_cap, PS_MODE_LPS, enter_wow);
pstatus = phl_ps_cfg_pwr_lvl(phl_info, PS_MODE_LPS, PS_PWR_LVL_PWRON, wow_info->ps_pwr_lvl);
}
} else {
PHL_ERR("%s : undefined wowlan op mode.\n", __func__);
}
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] %s : op mode %d, pwr lvl %s.\n.",
__func__, wow_info->op_mode, phl_ps_pwr_lvl_to_str(wow_info->ps_pwr_lvl));
}
enum rtw_phl_status phl_wow_leave_low_power(struct phl_wow_info *wow_info)
{
enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
struct phl_info_t *phl_info = wow_info->phl_info;
PHL_TRACE(COMP_PHL_WOW, _PHL_INFO_, "[wow] %s : op mode %d, pwr lvl %s.\n.",
__func__, wow_info->op_mode, phl_ps_pwr_lvl_to_str(wow_info->ps_pwr_lvl));
if (wow_info->ps_pwr_lvl == PS_PWR_LVL_PWRON)
return RTW_PHL_STATUS_SUCCESS;
if (wow_info->op_mode == RTW_WOW_OP_DISCONNECT_STBY) {
/* IPS */
} else if (wow_info->op_mode == RTW_WOW_OP_CONNECT_STBY) {
pstatus = phl_ps_cfg_pwr_lvl(phl_info, PS_MODE_LPS, wow_info->ps_pwr_lvl, PS_PWR_LVL_PWRON);
} else {
PHL_ERR("%s : undefined wowlan op mode.\n", __func__);
}
return pstatus;
}
#endif
#define case_rsn(rsn) \
case RTW_WOW_RSN_##rsn: return #rsn
const char *rtw_phl_get_wow_rsn_str(void *phl, enum rtw_wow_wake_reason wake_rsn)
{
switch (wake_rsn) {
case_rsn(UNKNOWN); /* RTW_WOW_RSN_UNKNOWN */
case_rsn(RX_PAIRWISEKEY);
case_rsn(RX_GTK);
case_rsn(RX_FOURWAY_HANDSHAKE);
case_rsn(RX_DISASSOC);
case_rsn(RX_DEAUTH);
case_rsn(RX_ARP_REQUEST);
case_rsn(RX_NS);
case_rsn(RX_EAPREQ_IDENTIFY);
case_rsn(FW_DECISION_DISCONNECT);
case_rsn(RX_MAGIC_PKT);
case_rsn(RX_UNICAST_PKT);
case_rsn(RX_PATTERN_PKT);
case_rsn(RTD3_SSID_MATCH);
case_rsn(RX_DATA_PKT);
case_rsn(RX_SSDP_MATCH);
case_rsn(RX_WSD_MATCH);
case_rsn(RX_SLP_MATCH);
case_rsn(RX_LLTD_MATCH);
case_rsn(RX_MDNS_MATCH);
case_rsn(RX_REALWOW_V2_WAKEUP_PKT);
case_rsn(RX_REALWOW_V2_ACK_LOST);
case_rsn(RX_REALWOW_V2_TX_KAPKT);
case_rsn(ENABLE_FAIL_DMA_IDLE);
case_rsn(ENABLE_FAIL_DMA_PAUSE);
case_rsn(RTIME_FAIL_DMA_IDLE);
case_rsn(RTIME_FAIL_DMA_PAUSE);
case_rsn(RX_SNMP_MISMATCHED_PKT);
case_rsn(RX_DESIGNATED_MAC_PKT);
case_rsn(NLO_SSID_MACH);
case_rsn(AP_OFFLOAD_WAKEUP);
case_rsn(DMAC_ERROR_OCCURRED);
case_rsn(EXCEPTION_OCCURRED);
case_rsn(L0_TO_L1_ERROR_OCCURRED);
case_rsn(ASSERT_OCCURRED);
case_rsn(L2_ERROR_OCCURRED);
case_rsn(WDT_TIMEOUT_WAKE);
case_rsn(RX_ACTION);
case_rsn(CLK_32K_UNLOCK);
case_rsn(CLK_32K_LOCK);
default:
return "UNDEFINED"; /* RTW_WOW_RSN_MAX */
}
}
enum rtw_phl_status rtw_phl_cfg_wow_set_sw_gpio_mode(void *phl, struct rtw_wow_gpio_info *info)
{
enum rtw_phl_status phl_status = RTW_PHL_STATUS_SUCCESS;
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct phl_wow_info *wow_info = phl_to_wow_info(phl_info);
struct rtw_wow_gpio_info *wow_gpio = &wow_info->wow_gpio;
FUNCIN();
wow_gpio->dev2hst_gpio = info->dev2hst_gpio;
wow_gpio->dev2hst_gpio_mode = info->dev2hst_gpio_mode;
phl_status = rtw_hal_set_sw_gpio_mode(phl_info->phl_com, phl_info->hal
, wow_gpio->dev2hst_gpio_mode, wow_gpio->dev2hst_gpio);
PHL_INFO("%s, gpio=%d, gpio_mode=%d\n", __FUNCTION__
, wow_gpio->dev2hst_gpio, wow_gpio->dev2hst_gpio_mode);
return phl_status;
}
enum rtw_phl_status rtw_phl_cfg_wow_sw_gpio_ctrl(void *phl, struct rtw_wow_gpio_info *info)
{
enum rtw_phl_status phl_status = RTW_PHL_STATUS_SUCCESS;
struct phl_info_t *phl_info = (struct phl_info_t *)phl;
struct phl_wow_info *wow_info = phl_to_wow_info(phl_info);
struct rtw_wow_gpio_info *wow_gpio = &wow_info->wow_gpio;
FUNCIN();
wow_gpio->dev2hst_high = info->dev2hst_high;
phl_status = rtw_hal_sw_gpio_ctrl(phl_info->phl_com, phl_info->hal
, wow_gpio->dev2hst_high, wow_gpio->dev2hst_gpio);
PHL_INFO("%s, gpio=%d, output=%d\n", __FUNCTION__
, wow_gpio->dev2hst_gpio, wow_gpio->dev2hst_high);
return phl_status;
}
#endif /* CONFIG_WOWLAN */
|
2301_81045437/rtl8852be
|
phl/phl_wow.c
|
C
|
agpl-3.0
| 59,919
|
/******************************************************************************
*
* Copyright(c)2019 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#ifndef _PHL_WOW_H_
#define _PHL_WOW_H_
#define WOW_HUBMSG_MAXLEN 50
#define PHL_WOW_ERR_DL_FW BIT0
#define PHL_WOW_ERR_PKT_OFLD BIT1
#define PHL_WOW_ERR_FUNC_EN BIT2
#define PHL_WOW_ERR_FUNC_DIS BIT3
#define PHL_WOW_ERR_MAC BIT4
#define PHL_WOW_ERR_TRX BIT5
#define PHL_WOW_ERR_HW BIT6
#define phl_to_wow_info(_phl) (&_phl->wow_info)
#define get_wow_pairwise_algo_type(_wow_info) (_wow_info->wow_wake_info.pairwise_sec_algo)
#define get_wow_group_algo_type(_wow_info) (_wow_info->wow_wake_info.group_sec_algo)
struct phl_wow_error {
u16 init;
u16 deinit;
};
struct phl_wow_stat {
/* init */
u8 func_en;
enum rtw_wow_op_mode op_mode;
u8 keep_alive_en;
u8 disc_det_en;
u8 arp_en;
u8 ndp_en;
u8 gtk_en;
u8 dot11w_en;
/* deinit */
enum rtw_wow_wake_reason wake_rsn;
enum rtw_mac_pwr_st mac_pwr;
/* common */
struct phl_wow_error err;
u8 aoac_rpt_fail_cnt;
};
struct phl_wow_info {
/* common */
struct phl_info_t *phl_info;
_os_lock wow_lock;
u8 wow_msg[WOW_HUBMSG_MAXLEN];
struct phl_wow_stat wow_stat;
/* general info, should reset */
u8 func_en;
struct phl_wow_error err;
struct rtw_phl_stainfo_t *sta;
enum rtw_wow_op_mode op_mode;
enum rtw_mac_pwr_st mac_pwr;
u8 ps_pwr_lvl;
/* pkt ofld token */
u32 null_pkt_token;
u32 arp_pkt_token;
u32 ndp_pkt_token;
u32 eapol_key_pkt_token;
u32 sa_query_pkt_token;
u32 kapkt_pkt_token;
u32 ack_pkt_token;
u32 wp_token;
/* func */
struct rtw_keep_alive_info keep_alive_info;
struct rtw_disc_det_info disc_det_info;
struct rtw_nlo_info nlo_info;
struct rtw_arp_ofld_info arp_ofld_info;
struct rtw_ndp_ofld_info ndp_ofld_info;
struct rtw_gtk_ofld_info gtk_ofld_info;
struct rtw_realwow_info realwow_info;
struct rtw_wow_wake_info wow_wake_info;
struct rtw_pattern_match_info pattern_match_info;
struct rtw_wow_gpio_info wow_gpio;
/* info to core */
enum rtw_wow_wake_reason wake_rsn;
struct rtw_aoac_report aoac_info;
};
enum rtw_phl_status phl_wow_mdl_init(struct phl_info_t* phl_info);
void phl_wow_mdl_deinit(struct phl_info_t* phl_info);
#ifdef CONFIG_WOWLAN
void phl_record_wow_stat(struct phl_wow_info *wow_info);
void phl_wow_handle_wake_rsn(struct phl_wow_info *wow_info, u8 *reset);
enum rtw_phl_status phl_wow_init_precfg(struct phl_wow_info *wow_info);
enum rtw_phl_status phl_wow_init_postcfg(struct phl_wow_info *wow_info);
enum rtw_phl_status phl_wow_deinit_precfg(struct phl_wow_info *wow_info);
enum rtw_phl_status phl_wow_deinit_postcfg(struct phl_wow_info *wow_info);
void phl_reset_wow_info(struct phl_wow_info *wow_info);
u8 phl_wow_nlo_exist(struct phl_wow_info *wow_info);
enum rtw_phl_status phl_wow_func_en(struct phl_wow_info *wow_info);
void phl_wow_func_dis(struct phl_wow_info *wow_info);
void phl_wow_decide_op_mode(struct phl_wow_info *wow_info, struct rtw_phl_stainfo_t *sta);
#ifdef CONFIG_POWER_SAVE
void phl_wow_ps_pctl_cfg(struct phl_wow_info *wow_info, u8 enter_wow);
void phl_wow_ps_pwr_cfg(struct phl_wow_info *wow_info, u8 enter_wow);
enum rtw_phl_status phl_wow_leave_low_power(struct phl_wow_info *wow_info);
#endif
#endif /* CONFIG_WOWLAN */
#endif /* _PHL_WOW_H_ */
|
2301_81045437/rtl8852be
|
phl/phl_wow.h
|
C
|
agpl-3.0
| 3,771
|
/******************************************************************************
*
* Copyright(c)2019 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#ifndef _PHL_WOW_DEF_H_
#define _PHL_WOW_DEF_H_
enum rtw_wow_op_mode {
RTW_WOW_OP_NONE = 0,
RTW_WOW_OP_PWR_DOWN,
RTW_WOW_OP_DISCONNECT_STBY,
RTW_WOW_OP_CONNECT_STBY,
RTW_WOW_OP_MAX = 0xF
};
enum rtw_wow_wake_reason {
RTW_WOW_RSN_UNKNOWN = 0,
RTW_WOW_RSN_RX_PAIRWISEKEY,
RTW_WOW_RSN_RX_GTK,
RTW_WOW_RSN_RX_FOURWAY_HANDSHAKE,
RTW_WOW_RSN_RX_DISASSOC,
RTW_WOW_RSN_RX_DEAUTH, /* 5 */
RTW_WOW_RSN_RX_ARP_REQUEST,
RTW_WOW_RSN_RX_NS,
RTW_WOW_RSN_RX_EAPREQ_IDENTIFY,
RTW_WOW_RSN_FW_DECISION_DISCONNECT,
RTW_WOW_RSN_RX_MAGIC_PKT, /* 10 */
RTW_WOW_RSN_RX_UNICAST_PKT,
RTW_WOW_RSN_RX_PATTERN_PKT,
RTW_WOW_RSN_RTD3_SSID_MATCH,
RTW_WOW_RSN_RX_DATA_PKT,
RTW_WOW_RSN_RX_SSDP_MATCH, /* 15 */
RTW_WOW_RSN_RX_WSD_MATCH,
RTW_WOW_RSN_RX_SLP_MATCH,
RTW_WOW_RSN_RX_LLTD_MATCH,
RTW_WOW_RSN_RX_MDNS_MATCH,
RTW_WOW_RSN_RX_REALWOW_V2_WAKEUP_PKT, /* 20 */
RTW_WOW_RSN_RX_REALWOW_V2_ACK_LOST,
RTW_WOW_RSN_RX_REALWOW_V2_TX_KAPKT,
RTW_WOW_RSN_ENABLE_FAIL_DMA_IDLE,
RTW_WOW_RSN_ENABLE_FAIL_DMA_PAUSE,
RTW_WOW_RSN_RTIME_FAIL_DMA_IDLE, /* 25 */
RTW_WOW_RSN_RTIME_FAIL_DMA_PAUSE,
RTW_WOW_RSN_RX_SNMP_MISMATCHED_PKT,
RTW_WOW_RSN_RX_DESIGNATED_MAC_PKT,
RTW_WOW_RSN_NLO_SSID_MACH,
RTW_WOW_RSN_AP_OFFLOAD_WAKEUP, /* 30 */
RTW_WOW_RSN_DMAC_ERROR_OCCURRED,
RTW_WOW_RSN_EXCEPTION_OCCURRED,
RTW_WOW_RSN_L0_TO_L1_ERROR_OCCURRED,
RTW_WOW_RSN_ASSERT_OCCURRED,
RTW_WOW_RSN_L2_ERROR_OCCURRED, /* 35 */
RTW_WOW_RSN_WDT_TIMEOUT_WAKE,
RTW_WOW_RSN_RX_ACTION,
RTW_WOW_RSN_CLK_32K_UNLOCK,
RTW_WOW_RSN_CLK_32K_LOCK,
RTW_WOW_RSN_MAX = 0xFF
};
struct rtw_keep_alive_info {
/* core */
u8 keep_alive_en;
u8 keep_alive_period;
/* phl/hal */
u8 null_pkt_id;
};
struct rtw_disc_det_info {
/* core */
u8 disc_det_en;
u8 disc_wake_en;
u8 try_pkt_count;
u8 check_period;
u8 cnt_bcn_lost_en;
u8 cnt_bcn_lost_limit;
};
struct rtw_nlo_info {
u8 nlo_en;
u8 nlo_exist;
};
struct rtw_arp_ofld_content {
u8 arp_en;
u8 remote_ipv4_addr[IPV4_ADDRESS_LENGTH];
u8 host_ipv4_addr[IPV4_ADDRESS_LENGTH];
u8 mac_addr[MAC_ADDRESS_LENGTH];
};
struct rtw_arp_ofld_info {
u8 arp_en;
u8 arp_action; /* 0 = send arp response, 1 = wake up host */
u8 arp_rsp_id;
struct rtw_arp_ofld_content arp_ofld_content;
};
struct rtw_ndp_ofld_content {
u8 ndp_en;
u8 chk_remote_ip;
u8 num_target_ip;
u8 mac_addr[MAC_ADDRESS_LENGTH];
u8 remote_ipv6_addr[IPV6_ADDRESS_LENGTH];
u8 target_ipv6_addr[2][IPV6_ADDRESS_LENGTH];
};
struct rtw_ndp_ofld_info {
u8 ndp_en;
u8 ndp_id;
struct rtw_ndp_ofld_content ndp_ofld_content[2];
};
struct rtw_gtk_ofld_content {
u8 kck[32];
u32 kck_len;
u8 kek[32];
u32 kek_len;
u8 tk1[16];
u8 txmickey[8];
u8 rxmickey[8];
u8 replay_cnt[8];
u8 igtk_keyid[4];
u8 ipn[8];
u8 igtk[2][32];
u8 igtk_len;
u8 psk[32];
u8 psk_len;
};
struct rtw_gtk_ofld_info {
/* core */
u8 gtk_en;
u8 tkip_en;
u8 ieee80211w_en;
u8 pairwise_wakeup;
u8 bip_sec_algo;
u8 akmtype_byte3;
struct rtw_gtk_ofld_content gtk_ofld_content;
/* phl */
u8 hw_11w_en; /* keep 1 for BIP-CMAC-128 so far */
u8 gtk_rsp_id; /* eapol pkt id */
u8 sa_query_id;
};
#define MAX_WOW_PATTERN_SIZE_BIT 128
#define MAX_WOW_PATTERN_SIZE_BYTE 16
#define MAX_WOW_PATTERN_SIZE_DWORD 4
struct rtw_wowcam_upd_info {
u8 rw;
u8 wow_cam_idx;
u32 wake_mask[4];
u16 match_crc;
u8 is_negative_pattern_match;
u8 skip_mac_hdr;
u8 uc;
u8 mc;
u8 bc;
u8 valid;
u8 ptrn[MAX_WOW_PATTERN_SIZE_BIT];
u32 ptrn_len;
u8 mask[MAX_WOW_PATTERN_SIZE_BYTE];
};
#define MAX_WOW_CAM_NUM 18
struct rtw_pattern_match_info{
struct rtw_wowcam_upd_info wowcam_info[MAX_WOW_CAM_NUM];
};
#define MAX_REALWOW_KCP_SIZE 124 /* (100 + 24) */
#define MAX_REALWOW_PAYLOAD 64
struct rtw_realwow_ofld_content {
u16 interval; /* unit : 1 ms */
u16 keep_alive_pkt_size;
u16 ack_lost_limit;
u16 ack_ptrn_size;
u16 wakeup_ptrn_size;
u16 keep_alive_pkt_ptrn[MAX_REALWOW_KCP_SIZE];
u8 ack_ptrn[MAX_REALWOW_PAYLOAD];
u8 wakeup_ptrn[MAX_REALWOW_PAYLOAD];
u32 wakeup_sec_num;
};
struct rtw_realwow_info {
u8 realwow_en;
u8 auto_wakeup;
u8 keepalive_id;
u8 wakeup_pattern_id;
u8 ack_pattern_id;
struct rtw_realwow_ofld_content realwow_ofld_content;
};
struct rtw_wow_gpio_info {
u8 dev2hst_gpio_en;
u8 disable_inband;
u8 gpio_output_input;
u8 gpio_active;
u8 toggle_pulse;
u8 data_pin_wakeup;
u8 gpio_pulse_nonstop;
u8 gpio_time_unit;
u8 gpio_num;
u8 gpio_pulse_dura;
u8 gpio_pulse_period;
u8 gpio_pulse_count;
u8 customer_id;
u8 gpio_pulse_en_a;
u8 gpio_duration_unit_a;
u8 gpio_pulse_nonstop_a;
u8 special_reason_a;
u8 gpio_duration_a;
u8 gpio_pulse_count_a;
enum rtw_gpio_mode dev2hst_gpio_mode;
u8 dev2hst_gpio;
u8 dev2hst_high;
};
struct rtw_remote_wake_ctrl_info {
/* core */
u8 ptk_tx_iv[IV_LENGTH];
u8 valid_check;
u8 symbol_check_en;
u8 gtk_key_idx;
u8 ptk_rx_iv[IV_LENGTH];
u8 gtk_rx_iv_idx0[IV_LENGTH];
u8 gtk_rx_iv_idx1[IV_LENGTH];
u8 gtk_rx_iv_idx2[IV_LENGTH];
u8 gtk_rx_iv_idx3[IV_LENGTH];
};
struct rtw_wow_wake_info {
/* core */
u8 wow_en;
u8 drop_all_pkt;
u8 rx_parse_after_wake;
u8 pairwise_sec_algo;
u8 group_sec_algo;
u8 bip_sec_algo;
u8 pattern_match_en;
u8 magic_pkt_en;
u8 hw_unicast_en;
u8 fw_unicast_en;
u8 deauth_wakeup;
u8 rekey_wakeup;
u8 eap_wakeup;
u8 all_data_wakeup;
struct rtw_remote_wake_ctrl_info remote_wake_ctrl_info;
};
struct rtw_aoac_report {
/* status check */
u8 rpt_fail;
/* report from fw */
u8 rpt_ver;
u8 sec_type;
u8 key_idx;
u8 pattern_idx;
u8 rekey_ok;
u8 ptk_tx_iv[IV_LENGTH];
u8 eapol_key_replay_count[8];
u8 gtk[32];
u8 ptk_rx_iv[IV_LENGTH];
u8 gtk_rx_iv[4][IV_LENGTH];
u8 igtk_key_id[8];
u8 igtk_ipn[8];
u8 igtk[32];
};
#ifdef CONFIG_WOWLAN
/* Exported APIs to core */
enum rtw_phl_status rtw_phl_cfg_keep_alive_info(void *phl, struct rtw_keep_alive_info *info);
enum rtw_phl_status rtw_phl_cfg_disc_det_info(void *phl, struct rtw_disc_det_info *info);
enum rtw_phl_status rtw_phl_cfg_nlo_info(void *phl, struct rtw_nlo_info *info);
void rtw_phl_cfg_arp_ofld_info(void *phl, struct rtw_arp_ofld_info *info);
void rtw_phl_cfg_ndp_ofld_info(void *phl, struct rtw_ndp_ofld_info *info);
enum rtw_phl_status rtw_phl_remove_wow_ptrn_info(void *phl, u8 phl_ptrn_id);
enum rtw_phl_status rtw_phl_add_wow_ptrn_info(void *phl, struct rtw_wowcam_upd_info *info, u8 *phl_ptrn_id);
enum rtw_phl_status rtw_phl_cfg_gtk_ofld_info(void *phl, struct rtw_gtk_ofld_info *info);
enum rtw_phl_status rtw_phl_cfg_realwow_info(void *phl, struct rtw_realwow_info *info);
enum rtw_phl_status rtw_phl_cfg_wow_wake(void *phl, struct rtw_wow_wake_info *info);
enum rtw_phl_status rtw_phl_cfg_gpio_wake_pulse(void *phl, struct rtw_wow_gpio_info *info);
const char *rtw_phl_get_wow_rsn_str(void *phl, enum rtw_wow_wake_reason wake_rsn);
enum rtw_phl_status rtw_phl_cfg_wow_set_sw_gpio_mode(void *phl, struct rtw_wow_gpio_info *info);
enum rtw_phl_status rtw_phl_cfg_wow_sw_gpio_ctrl(void *phl, struct rtw_wow_gpio_info *info);
#endif /* CONFIG_WOWLAN */
#endif /* _PHL_WOW_DEF_H_ */
|
2301_81045437/rtl8852be
|
phl/phl_wow_def.h
|
C
|
agpl-3.0
| 7,603
|
/******************************************************************************
*
* Copyright(c) 2020 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#pragma once
#ifndef PHL_WPP_H
#define PHL_WPP_H
#define PHL_WPP_CONTROL_GUID\
WPP_DEFINE_CONTROL_GUID( \
rtPhlGuid, (16D085D0,69AD,41FD,94E1,77667C0C6664), \
WPP_DEFINE_BIT(COMP_PHL_DBG) \
WPP_DEFINE_BIT(COMP_PHL_RECV) \
WPP_DEFINE_BIT(COMP_PHL_XMIT) \
WPP_DEFINE_BIT(COMP_PHL_MAC) \
WPP_DEFINE_BIT(COMP_PHL_SOUND)\
WPP_DEFINE_BIT(COMP_PHL_WOW)\
WPP_DEFINE_BIT(COMP_PHL_TRIG)\
WPP_DEFINE_BIT(COMP_PHL_PKTOFLD)\
WPP_DEFINE_BIT(COMP_PHL_FSM)\
WPP_DEFINE_BIT(COMP_PHL_PS)\
WPP_DEFINE_BIT(COMP_PHL_PSTS)\
WPP_DEFINE_BIT(COMP_PHL_LED)\
WPP_DEFINE_BIT(COMP_PHL_BB)\
WPP_DEFINE_BIT(COMP_PHL_RF)\
WPP_DEFINE_BIT(COMP_PHL_MCC)\
WPP_DEFINE_BIT(COMP_PHL_P2PPS)\
WPP_DEFINE_BIT(COMP_PHL_ECSA)\
WPP_DEFINE_BIT(COMP_PHL_CMDDISP)\
WPP_DEFINE_BIT(COMP_PHL_BTC)\
WPP_DEFINE_BIT(COMP_PHL_TWT)\
) \
HALMAC_WPP_CONTROL_GUIDS \
HALBB_WPP_CONTROL_GUIDS
//HALRF_WPP_CONTROL_GUIDS
/*
* Note that the comment blocks begin with "begin_wpp" and end with "end_wpp"
* will be scanned by the trace preprocessor to define our trace function.
*/
#undef WPP_COMPID_LEVEL_ENABLED
#define WPP_COMPID_LEVEL_ENABLED(COMPID, LEVEL) \
((WPP_CONTROL(WPP_BIT_ ## COMPID).Level >= LEVEL) && \
(WPP_CONTROL(WPP_BIT_ ## COMPID).Flags[WPP_FLAG_NO(WPP_BIT_ ## COMPID)] & \
WPP_MASK(WPP_BIT_ ## COMPID)))
#undef WPP_COMPID_LEVEL_LOGGER
#define WPP_COMPID_LEVEL_LOGGER(COMPID, LEVEL) \
(WPP_CONTROL(WPP_BIT_ ## COMPID).Logger),
#undef WPP_COMPID_LEVEL__sts_ENABLED
#define WPP_COMPID_LEVEL__sts_ENABLED(COMPID, LEVEL, _sts) \
((WPP_CONTROL(WPP_BIT_ ## COMPID).Level >= LEVEL) && \
(WPP_CONTROL(WPP_BIT_ ## COMPID).Flags[WPP_FLAG_NO(WPP_BIT_ ## COMPID)] & \
WPP_MASK(WPP_BIT_ ## COMPID)))
#undef WPP_COMPID_LEVEL__sts_LOGGER
#define WPP_COMPID_LEVEL__sts_LOGGER(COMPID, LEVEL, _sts) \
(WPP_CONTROL(WPP_BIT_ ## COMPID).Logger),
/*
* begin_wpp config
*
* USEPREFIX (PHL_TRACE, "%!STDPREFIX! %s", PHL_PREFIX);
* FUNC PHL_TRACE{}(COMPID, LEVEL, MSG,...);
*
* FUNC PHL_DATA{}(COMPID, LEVEL, MSG,...);
*
* USEPREFIX (PHL_ERR, "%!STDPREFIX! %s ERROR\t", PHL_PREFIX);
* FUNC PHL_ERR{COMPID=COMP_PHL_DBG, LEVEL=TRACE_LEVEL_ERROR}(MSG,...);
*
* USEPREFIX (PHL_PRINT, "%!STDPREFIX! %s", PHL_PREFIX);
* FUNC PHL_PRINT{COMPID=COMP_PHL_DBG, LEVEL=TRACE_LEVEL_CRITICAL}(MSG,...);
*
* USEPREFIX (PHL_WARN, "%!STDPREFIX! %s WARN\t", PHL_PREFIX);
* FUNC PHL_WARN{COMPID=COMP_PHL_DBG, LEVEL=TRACE_LEVEL_WARNING}(MSG,...);
*
* USEPREFIX (PHL_INFO, "%!STDPREFIX! %s INFO\t", PHL_PREFIX);
* FUNC PHL_INFO{COMPID=COMP_PHL_DBG, LEVEL=TRACE_LEVEL_INFORMATION}(MSG,...);
*
* USEPREFIX (PHL_DBG, "%!STDPREFIX! %s", PHL_PREFIX);
* FUNC PHL_DBG{COMPID=COMP_PHL_DBG, LEVEL=TRACE_LEVEL_VERBOSE}(MSG,...);
*
* USEPREFIX (FUNCIN, "%!STDPREFIX! %s", PHL_PREFIX);
* FUNC FUNCIN{COMPID=COMP_PHL_DBG, LEVEL=TRACE_LEVEL_VERBOSE}();
* USESUFFIX(FUNCIN, "Enter %!FUNC!");
*
* USEPREFIX (FUNCOUT, "%!STDPREFIX! %s", PHL_PREFIX);
* FUNC FUNCOUT{COMPID=COMP_PHL_DBG, LEVEL=TRACE_LEVEL_VERBOSE}();
* USESUFFIX(FUNCOUT, "Leave %!FUNC!");
*
* CUSTOM_TYPE(RTW_PHL_STATUS, ItemEnum(rtw_phl_status) );
* USEPREFIX (FUNCIN_WSTS, "%!STDPREFIX! %s", PHL_PREFIX);
* FUNC FUNCIN_WSTS{COMPID=COMP_PHL_DBG, LEVEL=TRACE_LEVEL_VERBOSE}(_sts);
* USESUFFIX(FUNCIN_WSTS, "Enter with %!RTW_PHL_STATUS! %!FUNC!", _sts);
*
* USEPREFIX (FUNCOUT_WSTS, "%!STDPREFIX! %s", PHL_PREFIX);
* FUNC FUNCOUT_WSTS{COMPID=COMP_PHL_DBG, LEVEL=TRACE_LEVEL_VERBOSE}(_sts);
* USESUFFIX(FUNCOUT_WSTS, "Leave with %!RTW_PHL_STATUS! %!FUNC!", _sts);
*
* USEPREFIX (BB_TRACE, "%!STDPREFIX! [BB]\t");
* FUNC BB_TRACE{COMPID=COMP_PHL_BB, LEVEL=TRACE_LEVEL_INFORMATION}(MSG,...);
*
* USEPREFIX (BB_WARNING, "%!STDPREFIX! [BB][WARN]\t");
* FUNC BB_WARNING{COMPID=COMP_PHL_BB, LEVEL=TRACE_LEVEL_WARNING}(MSG,...);
*
* USEPREFIX (RF_TRACE, "%!STDPREFIX! [RF]\t");
* FUNC RF_TRACE{COMPID=COMP_PHL_RF, LEVEL=TRACE_LEVEL_INFORMATION}(MSG,...);
*
* USEPREFIX (RF_WARNING, "%!STDPREFIX! [RF][WARN]\t");
* FUNC RF_WARNING{COMPID=COMP_PHL_RF, LEVEL=TRACE_LEVEL_WARNING}(MSG,...);
*
* end_wpp
*/
/*
* begin_wpp config
*
* USEPREFIX (PLTFM_MSG_ERR, "%!STDPREFIX! [MAC][ERR]\t");
* FUNC PLTFM_MSG_ERR{COMPID=COMP_PHL_MAC, LEVEL=TRACE_LEVEL_ERROR}(MSG,...);
*
* USEPREFIX (PLTFM_MSG_ALWAYS, "%!STDPREFIX! [MAC]\t");
* FUNC PLTFM_MSG_ALWAYS{COMPID=COMP_PHL_MAC, LEVEL=TRACE_LEVEL_CRITICAL}(MSG,...);
*
* USEPREFIX (PLTFM_MSG_WARN, "%!STDPREFIX! [MAC][WARN]\t");
* FUNC PLTFM_MSG_WARN{COMPID=COMP_PHL_MAC, LEVEL=TRACE_LEVEL_WARNING}(MSG,...);
*
* USEPREFIX (PLTFM_MSG_TRACE, "%!STDPREFIX! [MAC][TRACE]\t");
* FUNC PLTFM_MSG_TRACE{COMPID=COMP_PHL_MAC, LEVEL=TRACE_LEVEL_INFORMATION}(MSG,...);
*
* end_wpp
*/
#ifdef CONFIG_FSM
#undef WPP_COMPID_LEVEL_FSM_ENABLED
#define WPP_COMPID_LEVEL_FSM_ENABLED(COMPID, LEVEL, FSM) \
((!FSM || phl_fsm_dbg_level(FSM, LEVEL) || \
LEVEL == TRACE_LEVEL_ERROR || LEVEL == TRACE_LEVEL_WARNING) && \
(WPP_CONTROL(WPP_BIT_ ## COMPID).Level >= LEVEL) && \
(WPP_CONTROL(WPP_BIT_ ## COMPID).Flags[WPP_FLAG_NO(WPP_BIT_ ## COMPID)] & \
WPP_MASK(WPP_BIT_ ## COMPID)))
#undef WPP_COMPID_LEVEL_FSM_LOGGER
#define WPP_COMPID_LEVEL_FSM_LOGGER(COMPID, LEVEL, FSM) \
(WPP_CONTROL(WPP_BIT_ ## COMPID).Logger),
#endif
/*
* begin_wpp config
*
* USEPREFIX (FSM_ERR, "%!STDPREFIX! %s ERROR\t", PHL_PREFIX);
* FUNC FSM_ERR{COMPID=COMP_PHL_DBG, LEVEL=TRACE_LEVEL_ERROR}(FSM,MSG,...);
*
* USEPREFIX (FSM_WARN, "%!STDPREFIX! %s WARN\t", PHL_PREFIX);
* FUNC FSM_WARN{COMPID=COMP_PHL_DBG, LEVEL=TRACE_LEVEL_WARNING}(FSM,MSG,...);
*
* USEPREFIX (FSM_PRINT, "%!STDPREFIX! %s", PHL_PREFIX);
* FUNC FSM_PRINT{COMPID=COMP_PHL_DBG, LEVEL=TRACE_LEVEL_CRITICAL}(FSM,MSG,...);
*
* USEPREFIX (FSM_INFO, "%!STDPREFIX! %s INFO\t", PHL_PREFIX);
* FUNC FSM_INFO{COMPID=COMP_PHL_DBG, LEVEL=TRACE_LEVEL_INFORMATION}(FSM,MSG,...);
*
* USEPREFIX (FSM_DBG, "%!STDPREFIX! %s", PHL_PREFIX);
* FUNC FSM_DBG{COMPID=COMP_PHL_DBG, LEVEL=TRACE_LEVEL_VERBOSE}(FSM,MSG,...);
*
* end_wpp
*/
#undef WPP_COMPID_LEVEL_opsfun_ENABLED
#define WPP_COMPID_LEVEL_opsfun_ENABLED(COMPID, LEVEL, opsfun) \
((WPP_CONTROL(WPP_BIT_ ## COMPID).Level >= LEVEL) && \
(WPP_CONTROL(WPP_BIT_ ## COMPID).Flags[WPP_FLAG_NO(WPP_BIT_ ## COMPID)] & \
WPP_MASK(WPP_BIT_ ## COMPID)))
#undef WPP_COMPID_LEVEL_opsfun_LOGGER
#define WPP_COMPID_LEVEL_opsfun_LOGGER(COMPID, LEVEL, opsfun) \
(WPP_CONTROL(WPP_BIT_ ## COMPID).Logger),
/*
* begin_wpp config
*
* USEPREFIX (phl_ops_error_msg, "%!STDPREFIX! %s", PHL_PREFIX);
* FUNC phl_ops_error_msg{COMPID=COMP_PHL_DBG, LEVEL=TRACE_LEVEL_ERROR}(opsfun);
* USESUFFIX(phl_ops_error_msg, "### %!FUNC! - Please hook phl_hci_ops.%s ###\n", opsfun);
*
* USEPREFIX (hal_error_msg, "%!STDPREFIX! %s", PHL_PREFIX);
* FUNC hal_error_msg{COMPID=COMP_PHL_DBG, LEVEL=TRACE_LEVEL_ERROR}(opsfun);
* USESUFFIX(hal_error_msg, "### %!FUNC! - Error : Please hook hal_ops.%s ###\n", opsfun);
*
* end_wpp
*/
/*
* Define the 'xstr' structure for logging buffer and length pairs
* and the 'log_xstr' function which returns it to create one in-place.
* this enables logging of complex data types.
*/
typedef struct xstr { char * _buf; size_t _len; } xstr_t;
__inline xstr_t log_xstr(void * p, size_t l)
{
xstr_t xs;
xs._buf = (char *)p;
xs._len = l;
return xs;
}
#define WPP_LOGHEXDUMP(x) WPP_LOGPAIR(2, &((x)._len)) WPP_LOGPAIR((x)._len, (x)._buf)
#undef WPP_COMPID_LEVEL_BUF_LEN_PREFIX_ENABLED
#define WPP_COMPID_LEVEL_BUF_LEN_PREFIX_ENABLED(COMPID, LEVEL, BUF, LEN, PREFIX) \
((WPP_CONTROL(WPP_BIT_ ## COMPID).Level >= LEVEL) && \
(WPP_CONTROL(WPP_BIT_ ## COMPID).Flags[WPP_FLAG_NO(WPP_BIT_ ## COMPID)] & \
WPP_MASK(WPP_BIT_ ## COMPID)))
#undef WPP_COMPID_LEVEL_BUF_LEN_PREFIX_LOGGER
#define WPP_COMPID_LEVEL_BUF_LEN_PREFIX_LOGGER(COMPID, LEVEL, BUF, LEN, PREFIX) \
(WPP_CONTROL(WPP_BIT_ ## COMPID).Logger),
#define WPP_COMPID_LEVEL_BUF_LEN_PREFIX_PRE(COMPID, LEVEL, BUF, LEN, PREFIX) \
{ if (BUF != NULL) { \
#define WPP_COMPID_LEVEL_BUF_LEN_PREFIX_POST(COMPID, LEVEL, BUF, LEN, PREFIX) \
/* TraceMessage() */; }}
/*
* begin_wpp config
*
* DEFINE_CPLX_TYPE(HEXDUMP, WPP_LOGHEXDUMP, xstr_t, ItemHEXDump, "s", _HEX_, 0, 2);
* USEPREFIX (debug_dump_data, "%!STDPREFIX! %s\t[debug dump] %s", PHL_PREFIX, PREFIX);
* FUNC debug_dump_data{COMPID=COMP_PHL_DBG, LEVEL=TRACE_LEVEL_INFORMATION}(BUF,LEN, PREFIX);
* USESUFFIX(debug_dump_data, "%!HEXDUMP!", log_xstr(BUF, LEN));
*
* end_wpp
*/
#undef WPP_LEVEL_BB_COMPID_ENABLED
#define WPP_LEVEL_BB_COMPID_ENABLED(LEVEL, BB, COMPID) \
((WPP_CONTROL(WPP_BIT_ ## COMPID).Level >= LEVEL) && \
(WPP_CONTROL(WPP_BIT_ ## COMPID).Flags[WPP_FLAG_NO(WPP_BIT_ ## COMPID)] & \
WPP_MASK(WPP_BIT_ ## COMPID)))
#undef WPP_LEVEL_BB_COMPID_LOGGER
#define WPP_LEVEL_BB_COMPID_LOGGER(LEVEL, BB, COMPID) \
(WPP_CONTROL(WPP_BIT_ ## COMPID).Logger),
/*
* begin_wpp config
*
* USEPREFIX (BB_DBG, "%!STDPREFIX! [BB]\t");
* FUNC BB_DBG{LEVEL=TRACE_LEVEL_INFORMATION}(BB, COMPID, MSG,...);
*
* end_wpp
*/
#undef WPP_LEVEL_RF_COMPID_ENABLED
#define WPP_LEVEL_RF_COMPID_ENABLED(LEVEL, RF, COMPID) \
((WPP_CONTROL(WPP_BIT_ ## COMPID).Level >= LEVEL) && \
(WPP_CONTROL(WPP_BIT_ ## COMPID).Flags[WPP_FLAG_NO(WPP_BIT_ ## COMPID)] & \
WPP_MASK(WPP_BIT_ ## COMPID)))
#undef WPP_LEVEL_RF_COMPID_LOGGER
#define WPP_LEVEL_RF_COMPID_LOGGER(LEVEL, RF, COMPID) \
(WPP_CONTROL(WPP_BIT_ ## COMPID).Logger),
/*
* begin_wpp config
*
* USEPREFIX (RF_DBG, "%!STDPREFIX! [RF]\t");
* FUNC RF_DBG{LEVEL=TRACE_LEVEL_INFORMATION}(RF, COMPID, MSG,...);
*
* end_wpp
*/
#endif
|
2301_81045437/rtl8852be
|
phl/phl_wpp.h
|
C
|
agpl-3.0
| 10,223
|
/******************************************************************************
*
* Copyright(c) 2019 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#ifndef _PLTFM_OPS_H_
#define _PLTFM_OPS_H_
#ifdef PHL_PLATFORM_WINDOWS
#include "pltfm_ops_windows.h"
#elif defined(PHL_PLATFORM_LINUX)
#include "pltfm_ops_linux.h"
#elif defined(PHL_PLATFORM_MACOS)
#include "pltfm_ops_macos.h"
#else
#include "pltfm_ops_none.h"
#endif
#endif /*_PLTFM_OPS_H_*/
|
2301_81045437/rtl8852be
|
phl/pltfm_ops.h
|
C
|
agpl-3.0
| 946
|
/******************************************************************************
*
* Copyright(c) 2019 - 2020 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#ifndef _PLTFM_OPS_LINUX_H_
#define _PLTFM_OPS_LINUX_H_
#include "drv_types.h"
static inline char *_os_strpbrk(const char *s, const char *ct)
{
return strpbrk(s, ct);
}
static inline char *_os_strsep(char **s, const char *ct)
{
return strsep(s, ct);
}
#if 1
#define _os_sscanf(buf, fmt, ...) sscanf(buf, fmt, ##__VA_ARGS__)
#else
static inline int _os_sscanf(const char *buf, const char *fmt, ...)
{
va_list args;
int i;
va_start(args, fmt);
i = vsscanf(buf, fmt, args);
va_end(args);
return i;
}
#endif
static inline int _os_strcmp(const char *s1, const char *s2)
{
return strcmp(s1, s2);
}
static inline int _os_strncmp(const char *s1, const char *s2, size_t n)
{
return strncmp(s1, s2, n);
}
static inline char *_os_strcpy(char *dest, const char *src)
{
return strcpy(dest, src);
}
static inline char *_os_strncpy(char *dest, const char *src, size_t n)
{
return strncpy(dest, src, n);
}
#if 1
#define _os_strchr(s, c) strchr(s, c)
#else
static inline char*_os_strchr(const char *s, int c)
{
return strchr(s, c);
}
#endif
#if 1
#define _os_snprintf(s, sz, fmt, ...) snprintf(s, sz, fmt, ##__VA_ARGS__)
#define _os_vsnprintf(str, size, fmt, args) vsnprintf(str, size, fmt, args)
#else
static int _os_snprintf(char *str, size_t size, const char *fmt, ...)
{
va_list args;
int ret;
va_start(args, fmt);
ret = vsnprintf(str, size, fmt, args);
va_end(args);
if (size > 0)
str[size - 1] = '\0';
return ret;
}
#endif
static inline u32 _os_strlen(u8 *buf)
{
return strlen(buf);
}
static inline void _os_delay_ms(void *d, u32 ms)
{
rtw_mdelay_os(ms);
}
static inline void _os_delay_us(void *d, u32 us)
{
rtw_udelay_os(us);
}
static inline void _os_sleep_ms(void *d, u32 ms)
{
rtw_msleep_os(ms);
}
static inline void _os_sleep_us(void *d, u32 us)
{
rtw_usleep_os(us);
}
static inline u32 _os_get_cur_time_us(void)
{
return rtw_systime_to_us(rtw_get_current_time());
}
static inline u32 _os_get_cur_time_ms(void)
{
return rtw_systime_to_ms(rtw_get_current_time());
}
static inline u64 _os_modular64(u64 x, u64 y)
{
/*return do_div(x, y);*/
return rtw_modular64(x, y);
}
static inline u64 _os_division64(u64 x, u64 y)
{
/*return do_div(x, y);*/
return rtw_division64(x, y);
}
static inline u32 _os_div_round_up(u32 x, u32 y)
{
return RTW_DIV_ROUND_UP(x, y);
}
#ifdef CONFIG_PCI_HCI
static inline void _os_cache_inv(void *d, _dma *bus_addr_l,
_dma *bus_addr_h, u32 buf_sz, u8 direction)
{
struct dvobj_priv *pobj = (struct dvobj_priv *)d;
PPCI_DATA pci_data = dvobj_to_pci(pobj);
struct pci_dev *pdev = pci_data->ppcidev;
pci_cache_inv(pdev, bus_addr_l, buf_sz, direction);
}
static inline void _os_cache_wback(void *d, _dma *bus_addr_l,
_dma *bus_addr_h, u32 buf_sz, u8 direction)
{
struct dvobj_priv *pobj = (struct dvobj_priv *)d;
PPCI_DATA pci_data = dvobj_to_pci(pobj);
struct pci_dev *pdev = pci_data->ppcidev;
pci_cache_wback(pdev, bus_addr_l, buf_sz, direction);
}
/* txbd, rxbd, wd */
static inline void *_os_shmem_alloc(void *d, _dma *bus_addr_l,
_dma *bus_addr_h, u32 buf_sz,
u8 cache, u8 direction, void **os_rsvd)
{
struct dvobj_priv *pobj = (struct dvobj_priv *)d;
PPCI_DATA pci_data = dvobj_to_pci(pobj);
struct pci_dev *pdev = pci_data->ppcidev;
if(cache)
return pci_alloc_cache_mem(pdev, bus_addr_l, buf_sz, direction);
else
return pci_alloc_noncache_mem(pdev, bus_addr_l, buf_sz);
}
static inline void _os_shmem_free(void *d, u8 *vir_addr, _dma *bus_addr_l,
_dma *bus_addr_h, u32 buf_sz,
u8 cache, u8 direction, void *os_rsvd)
{
struct dvobj_priv *pobj = (struct dvobj_priv *)d;
PPCI_DATA pci_data = dvobj_to_pci(pobj);
struct pci_dev *pdev = pci_data->ppcidev;
if(cache)
return pci_free_cache_mem(pdev, vir_addr, bus_addr_l, buf_sz, direction);
else
return pci_free_noncache_mem(pdev, vir_addr, bus_addr_l, buf_sz);
}
#endif /*CONFIG_PCI_HCI*/
static inline void *_os_pkt_buf_unmap_rx(void *d, _dma bus_addr_l, _dma bus_addr_h, u32 buf_sz)
{
struct dvobj_priv *pobj = (struct dvobj_priv *)d;
#ifdef CONFIG_PCI_HCI
PPCI_DATA pci_data = dvobj_to_pci(pobj);
struct pci_dev *pdev = pci_data->ppcidev;
#endif /*CONFIG_PCI_HCI*/
#ifdef CONFIG_PCI_HCI
pci_unmap_single(pdev, bus_addr_l, buf_sz, PCI_DMA_FROMDEVICE);
#endif
#ifdef RTW_CORE_RECORD
phl_add_record(d, REC_RX_UNMAP, bus_addr_l, buf_sz);
#endif
return NULL;
}
static inline void *_os_pkt_buf_map_rx(void *d, _dma *bus_addr_l, _dma *bus_addr_h,
u32 buf_sz, void *os_priv)
{
struct dvobj_priv *pobj = (struct dvobj_priv *)d;
#ifdef CONFIG_PCI_HCI
PPCI_DATA pci_data = dvobj_to_pci(pobj);
struct pci_dev *pdev = pci_data->ppcidev;
struct sk_buff *skb = os_priv;
*bus_addr_l = pci_map_single(pdev, skb->data, buf_sz, PCI_DMA_FROMDEVICE);
/* *bus_addr_h = NULL;*/
#endif /*CONFIG_PCI_HCI*/
return NULL;
}
/* rxbuf */
#define PHL_RX_HEADROOM 50
static inline void *_os_pkt_buf_alloc_rx(void *d, _dma *bus_addr_l,
_dma *bus_addr_h, u32 buf_sz, void **os_priv)
{
struct dvobj_priv *pobj = (struct dvobj_priv *)d;
#ifdef CONFIG_PCI_HCI
PPCI_DATA pci_data = dvobj_to_pci(pobj);
struct pci_dev *pdev = pci_data->ppcidev;
#endif /*CONFIG_PCI_HCI*/
struct sk_buff *skb = NULL;
u32 rxbuf_size = buf_sz + PHL_RX_HEADROOM;
skb = rtw_skb_alloc(rxbuf_size);
if (!skb)
return NULL;
skb_pull(skb, PHL_RX_HEADROOM);
#ifdef CONFIG_PCI_HCI
*bus_addr_l = pci_map_single(pdev, skb->data, rxbuf_size, PCI_DMA_FROMDEVICE);
/* *bus_addr_h = NULL;*/
#endif /*CONFIG_PCI_HCI*/
*os_priv = skb;
return skb->data;
}
static inline void _os_pkt_buf_free_rx(void *d, u8 *vir_addr, _dma bus_addr_l,
_dma bus_addr_h, u32 buf_sz, void *os_priv)
{
struct dvobj_priv *pobj = (struct dvobj_priv *)d;
#ifdef CONFIG_PCI_HCI
PPCI_DATA pci_data = dvobj_to_pci(pobj);
struct pci_dev *pdev = pci_data->ppcidev;
#endif /*CONFIG_PCI_HCI*/
struct sk_buff *skb = (struct sk_buff *)os_priv;
#ifdef CONFIG_PCI_HCI
pci_unmap_single(pdev, bus_addr_l, buf_sz, PCI_DMA_FROMDEVICE);
#endif /*CONFIG_PCI_HCI*/
rtw_skb_free(skb);
}
/* phl pre-alloc network layer buffer */
static inline void * _os_alloc_netbuf(void *d, u32 buf_sz, void **os_priv)
{
return _os_pkt_buf_alloc_rx(d, NULL, NULL, buf_sz, os_priv);
}
/* Free netbuf for error case. (ex. drop rx-reorder packet) */
static inline void _os_free_netbuf(void *d, u8 *vir_addr, u32 buf_sz, void *os_priv)
{
_os_pkt_buf_free_rx(d, vir_addr, 0,0, buf_sz, os_priv);
}
/*virtually contiguous memory*/
static inline void *_os_mem_alloc(void *d, u32 buf_sz)
{
#ifdef DBG_PHL_MEM_ALLOC
struct dvobj_priv *obj = (struct dvobj_priv *)d;
ATOMIC_ADD_RETURN(&obj->phl_mem, buf_sz);
#endif
#ifdef CONFIG_PHL_USE_KMEM_ALLOC
return rtw_zmalloc(buf_sz);
#else
if (in_atomic()) {
RTW_ERR("Call rtw_zvmalloc in atomic @%s:%u\n",
__FUNCTION__, __LINE__);
dump_stack();
}
return rtw_zvmalloc(buf_sz);
#endif
}
/*virtually contiguous memory*/
static inline void _os_mem_free(void *d, void *buf, u32 buf_sz)
{
#ifdef DBG_PHL_MEM_ALLOC
struct dvobj_priv *obj = (struct dvobj_priv *)d;
ATOMIC_SUB(&obj->phl_mem, buf_sz);
#endif
#ifdef CONFIG_PHL_USE_KMEM_ALLOC
rtw_mfree(buf, buf_sz);
#else
if (in_atomic()) {
RTW_ERR("Call rtw_vmfree in atomic @%s:%u\n",
__FUNCTION__, __LINE__);
dump_stack();
}
rtw_vmfree(buf, buf_sz);
#endif
}
/*physically contiguous memory if the buffer will be accessed by a DMA device*/
static inline void *_os_kmem_alloc(void *d, u32 buf_sz)
{
#ifdef DBG_PHL_MEM_ALLOC
struct dvobj_priv *obj = (struct dvobj_priv *)d;
ATOMIC_ADD_RETURN(&obj->phl_mem, buf_sz);
#endif
return rtw_zmalloc(buf_sz);
}
/*physically contiguous memory if the buffer will be accessed by a DMA device*/
static inline void _os_kmem_free(void *d, void *buf, u32 buf_sz)
{
#ifdef DBG_PHL_MEM_ALLOC
struct dvobj_priv *obj = (struct dvobj_priv *)d;
ATOMIC_SUB(&obj->phl_mem, buf_sz);
#endif
rtw_mfree(buf, buf_sz);
}
static inline void _os_mem_set(void *d, void *buf, s8 value, u32 size)
{
_rtw_memset(buf, value, size);
}
static inline void _os_mem_cpy(void *d, void *dest, void *src, u32 size)
{
_rtw_memcpy(dest, src, size);
}
/*Return Value
* <0 :the first byte that does not match in both memory blocks has a lower value in ptr1 than in ptr2 (if evaluated as unsigned char values)
* 0 :the contents of both memory blocks are equal
* >0 :the first byte that does not match in both memory blocks has a greater value in ptr1 than in ptr2 (if evaluated as unsigned char values)
*/
static inline int _os_mem_cmp(void *d, const void *dest, const void *src, size_t size)
{
return memcmp(dest, src, size);
}
static inline void _os_init_timer(void *d, _os_timer *timer,
void (*call_back_func)(void *context), void *context,
const char *sz_id)
{
_init_timer(timer, call_back_func, context);
}
static inline void _os_set_timer(void *d, _os_timer *timer, u32 ms_delay)
{
_set_timer(timer, ms_delay);
}
static inline void _os_cancel_timer(void *d, _os_timer *timer)
{
_cancel_timer_ex(timer);
}
static inline void _os_cancel_timer_async(void *d, _os_timer *timer)
{
_cancel_timer_async(timer);
}
static inline void _os_release_timer(void *d, _os_timer *timer)
{
}
static inline void _os_mutex_init(void *d, _os_mutex *mutex)
{
_rtw_mutex_init(mutex);
}
static inline void _os_mutex_deinit(void *d, _os_mutex *mutex)
{
_rtw_mutex_free(mutex);
}
static inline void _os_mutex_lock(void *d, _os_mutex *mutex)
{
_rtw_mutex_lock_interruptible(mutex);
}
static inline void _os_mutex_unlock(void *d, _os_mutex *mutex)
{
_rtw_mutex_unlock(mutex);
}
static inline void _os_sema_init(void *d, _os_sema *sema, int int_cnt)
{
_rtw_init_sema(sema, int_cnt);
}
static inline void _os_sema_free(void *d, _os_sema *sema)
{
_rtw_free_sema(sema);
}
static inline void _os_sema_up(void *d, _os_sema *sema)
{
_rtw_up_sema(sema);
}
static inline u8 _os_sema_down(void *d, _os_sema *sema)
{
_rtw_down_sema(sema);
return 0; //success
}
/* event */
static __inline void _os_event_init(void *h, _os_event *event)
{
init_completion(event);
}
static __inline void _os_event_free(void *h, _os_event *event)
{
}
static __inline void _os_event_reset(void *h, _os_event *event)
{
/* TODO */
}
static __inline void _os_event_set(void *h, _os_event *event)
{
complete(event);
}
/*
* m_sec
* == 0 : wait for completion
* > 0 : wait for timeout or completion
* return value
* 0:timeout
* otherwise:success
*/
static __inline int _os_event_wait(void *h, _os_event *event, u32 m_sec)
{
unsigned long expire;
if (m_sec) {
expire = msecs_to_jiffies(m_sec);
if (expire > MAX_SCHEDULE_TIMEOUT)
expire = MAX_SCHEDULE_TIMEOUT;
}
else {
expire = MAX_SCHEDULE_TIMEOUT;
}
expire = wait_for_completion_timeout(event, expire);
if (expire == 0)
return 0; /* timeout */
return jiffies_to_msecs(expire); /* success */
}
/* spinlock */
static inline void _os_spinlock_init(void *d, _os_lock *plock)
{
_rtw_spinlock_init(plock);
}
static inline void _os_spinlock_free(void *d, _os_lock *plock)
{
_rtw_spinlock_free(plock);
}
static inline void _os_spinlock(void *d, _os_lock *plock,
enum lock_type type, _os_spinlockfg *flags
)
{
if(type == _irq)
{
if(flags==NULL)
RTW_ERR("_os_spinlock_irq: flags=NULL @%s:%u\n",
__FUNCTION__, __LINE__);
_rtw_spinlock_irq(plock, flags);
}
else if(type == _bh)
_rtw_spinlock_bh(plock);
else if(type == _ps)
_rtw_spinlock(plock);
}
static inline void _os_spinunlock(void *d, _os_lock *plock,
enum lock_type type, _os_spinlockfg *flags
)
{
if(type == _irq)
{
if(flags==NULL)
RTW_ERR("_os_spinunlock_irq: flags=NULL @%s:%u\n",
__FUNCTION__, __LINE__);
_rtw_spinunlock_irq(plock, flags);
}
else if(type == _bh)
_rtw_spinunlock_bh(plock);
else if(type == _ps)
_rtw_spinunlock(plock);
}
static inline int _os_test_and_clear_bit(int nr, unsigned long *addr)
{
return rtw_test_and_clear_bit(nr, addr);
}
static inline int _os_test_and_set_bit(int nr, unsigned long *addr)
{
return rtw_test_and_set_bit(nr, addr);
}
/* Atomic integer operations */
static inline void _os_atomic_set(void *d, _os_atomic *v, int i)
{
ATOMIC_SET(v, i);
}
static inline int _os_atomic_read(void *d, _os_atomic *v)
{
return ATOMIC_READ(v);
}
static inline void _os_atomic_add(void *d, _os_atomic *v, int i)
{
ATOMIC_ADD(v, i);
}
static inline void _os_atomic_sub(void *d, _os_atomic *v, int i)
{
ATOMIC_SUB(v, i);
}
static inline void _os_atomic_inc(void *d, _os_atomic *v)
{
ATOMIC_INC(v);
}
static inline void _os_atomic_dec(void *d, _os_atomic *v)
{
ATOMIC_DEC(v);
}
static inline int _os_atomic_add_return(void *d, _os_atomic *v, int i)
{
return ATOMIC_ADD_RETURN(v, i);
}
static inline int _os_atomic_sub_return(void *d, _os_atomic *v, int i)
{
return ATOMIC_SUB_RETURN(v, i);
}
static inline int _os_atomic_inc_return(void *d, _os_atomic *v)
{
return ATOMIC_INC_RETURN(v);
}
static inline int _os_atomic_dec_return(void *d, _os_atomic *v)
{
return ATOMIC_DEC_RETURN(v);
}
/*
static inline bool _os_atomic_inc_unless(void *d, _os_atomic *v, int u)
{
return ATOMIC_INC_UNLESS(v, 1, u);
}
*/
static inline u8 _os_tasklet_init(void *drv_priv, _os_tasklet *task,
void (*call_back_func)(void* context), void *context)
{
rtw_tasklet_init(task,
(void(*)(unsigned long))call_back_func,
(unsigned long)task);
return 0;
}
static inline u8 _os_tasklet_deinit(void *drv_priv, _os_tasklet *task)
{
rtw_tasklet_kill(task);
return 0;
}
static inline u8 _os_tasklet_schedule(void *drv_priv, _os_tasklet *task)
{
#if 1
rtw_tasklet_hi_schedule(task);
#else
rtw_tasklet_schedule(task);
#endif
return 0;
}
static __inline u8 _os_thread_init( void *drv_priv, _os_thread *thread,
int (*call_back_func)(void * context),
void *context,
const char namefmt[])
{
thread->thread_handler = rtw_thread_start((int(*)(void*))call_back_func, context, namefmt);
if (thread->thread_handler) {
RST_THREAD_STATUS(thread);
SET_THREAD_STATUS(thread, THREAD_STATUS_STARTED);
return RTW_PHL_STATUS_SUCCESS;
}
return RTW_PHL_STATUS_FAILURE;
}
static __inline u8 _os_thread_deinit(void *drv_priv, _os_thread *thread)
{
if (CHK_THREAD_STATUS(thread, THREAD_STATUS_STARTED)) {
CLR_THREAD_STATUS(thread, THREAD_STATUS_STARTED);
return rtw_thread_stop(thread->thread_handler);
}
return RTW_PHL_STATUS_SUCCESS;
}
static __inline enum rtw_phl_status _os_thread_schedule(void *drv_priv, _os_thread *thread)
{
return RTW_PHL_STATUS_SUCCESS;
}
static inline void _os_thread_stop(void *drv_priv, _os_thread *thread)
{
SET_THREAD_STATUS(thread, THREAD_STATUS_STOPPED);
}
static inline int _os_thread_check_stop(void *drv_priv, _os_thread *thread)
{
return CHK_THREAD_STATUS(thread, THREAD_STATUS_STOPPED);
}
static inline int _os_thread_wait_stop(void *drv_priv, _os_thread *thread)
{
rtw_thread_wait_stop();
return RTW_PHL_STATUS_SUCCESS;
}
#if 0
static inline _os_thread _os_thread_start(int (*threadfn)(void *data),
void *data, const char namefmt[])
{
return rtw_thread_start(threadfn, data, namefmt);
}
static inline bool _os_thread_stop(_os_thread th)
{
return rtw_thread_stop(th);
}
static inline void _os_thread_wait_stop(void)
{
rtw_thread_wait_stop();
}
static inline int _os_thread_should_stop(void)
{
return kthread_should_stop();
}
#endif
#ifdef CONFIG_PHL_CPU_BALANCE
static inline u8 _os_workitem_config_cpu(void *drv_priv, _os_workitem *workitem,
char *work_name, u8 cpu_id)
{
_config_workitem_cpu(workitem, work_name, cpu_id);
return 0;
}
#endif
static inline u8 _os_workitem_init(void *drv_priv, _os_workitem *workitem,
void (*call_back_func)(void* context), void *context)
{
#ifdef CONFIG_PHL_CPU_BALANCE
_init_workitem_cpu(workitem, call_back_func, context);
#else
_init_workitem(workitem, call_back_func, context);
#endif
return 0;
}
static inline u8 _os_workitem_schedule(void *drv_priv, _os_workitem *workitem)
{
#ifdef CONFIG_PHL_CPU_BALANCE
_set_workitem_cpu(workitem);
#else
_set_workitem(workitem);
#endif
return 0;
}
static inline u8 _os_workitem_deinit(void *drv_priv, _os_workitem *workitem)
{
#ifdef CONFIG_PHL_CPU_BALANCE
_cancel_workitem_sync_cpu(workitem);
#else
_cancel_workitem_sync(workitem);
#endif
return 0;
}
/* File Operation */
static inline u32 _os_read_file(const char *path, u8 *buf, u32 sz)
{
return (u32)rtw_retrieve_from_file(path, buf, sz);
}
/*BUS*/
#ifdef CONFIG_PCI_HCI
#include <pci_ops_linux.h>
static inline u8 _os_read8_pcie(void *d, u32 addr)
{
return os_pci_read8((struct dvobj_priv *)d, addr);
}
static inline u16 _os_read16_pcie(void *d, u32 addr)
{
return os_pci_read16((struct dvobj_priv *)d, addr);
}
static inline u32 _os_read32_pcie(void *d, u32 addr)
{
return os_pci_read32((struct dvobj_priv *)d, addr);
}
static inline int _os_write8_pcie(void *d, u32 addr, u8 val)
{
return os_pci_write8((struct dvobj_priv *)d, addr, val);
}
static inline int _os_write16_pcie(void *d, u32 addr, u16 val)
{
return os_pci_write16((struct dvobj_priv *)d, addr, val);
}
static inline int _os_write32_pcie(void *d, u32 addr, u32 val)
{
return os_pci_write32((struct dvobj_priv *)d, addr, val);
}
#endif/*#ifdef CONFIG_PCI_HCI*/
#ifdef CONFIG_USB_HCI
#include <usb_ops_linux.h>
static inline int _os_usbctrl_vendorreq(void *d, u8 request, u16 value,
u16 index, void *pdata, u16 len, u8 requesttype)
{
return usbctrl_vendorreq((struct dvobj_priv *)d, request, value,
index, pdata, len, requesttype);
}
static __inline u8 os_out_token_alloc(void *drv_priv)
{
return 0; // RTW_PHL_STATUS_SUCCESS
}
static __inline void os_out_token_free(void *drv_priv)
{
}
static inline int os_usb_tx(void *d, u8 *tx_buf_ptr,
u8 bulk_id, u32 len, u8 *pkt_data_buf)
{
return rtw_usb_write_port((struct dvobj_priv *)d, tx_buf_ptr,
bulk_id, len, pkt_data_buf);
}
static __inline void os_enable_usb_out_pipes(void *drv_priv)
{
}
static __inline void os_disable_usb_out_pipes(void *drv_priv)
{
/* Free bulkout urb */
rtw_usb_write_port_cancel(drv_priv);
}
static __inline u8 os_in_token_alloc(void *drv_priv)
{
// Allocate in token (pUrb) list
return 0;
}
static __inline void os_in_token_free(void *drv_priv)
{
// free in token memory
/*rtw_usb_read_port_free(drv_priv);*/
}
static __inline u8 os_send_usb_in_token(void *drv_priv, void *rxobj, u8 *inbuf, u32 inbuf_len, u8 pipe_idx, u8 minLen)
{
return rtw_usb_read_port(drv_priv, rxobj, inbuf, inbuf_len, pipe_idx, minLen);
}
static __inline void os_enable_usb_in_pipes(void *drv_priv)
{
}
static __inline void os_disable_usb_in_pipes(void *drv_priv)
{
// Cancel Pending IN IRPs.
rtw_usb_read_port_cancel(drv_priv);
}
#endif /*CONFIG_USB_HCI*/
#ifdef CONFIG_SDIO_HCI
#include <rtw_sdio.h>
#include <sdio_ops_linux.h>
#include <rtw_debug.h>
static inline u8 _os_sdio_cmd52_r8(void *d, u32 offset)
{
u8 val = SDIO_ERR_VAL8;
if (rtw_sdio_read_cmd52((struct dvobj_priv *)d, offset, &val, 1) == _FAIL)
RTW_ERR("%s: I/O FAIL!\n", __FUNCTION__);
return val;
}
static inline u8 _os_sdio_cmd53_r8(void *d, u32 offset)
{
u8 val = SDIO_ERR_VAL8;
if (rtw_sdio_read_cmd53((struct dvobj_priv *)d, offset, &val, 1) == _FAIL)
RTW_ERR("%s: I/O FAIL!\n", __FUNCTION__);
return val;
}
static inline u16 _os_sdio_cmd53_r16(void *d, u32 offset)
{
u16 val = SDIO_ERR_VAL16;
if (rtw_sdio_read_cmd53((struct dvobj_priv *)d, offset, &val, 2) == _FAIL) {
RTW_ERR("%s: I/O FAIL!\n", __FUNCTION__);
goto exit;
}
val = le16_to_cpu(val);
exit:
return val;
}
static inline u32 _os_sdio_cmd53_r32(void *d, u32 offset)
{
u32 val = SDIO_ERR_VAL32;
if (rtw_sdio_read_cmd53((struct dvobj_priv *)d, offset, &val, 4) == _FAIL) {
RTW_ERR("%s: I/O FAIL!\n", __FUNCTION__);
goto exit;
}
val = le32_to_cpu(val);
exit:
return val;
}
static inline u8 _os_sdio_cmd53_rn(void *d, u32 offset, u32 size, u8 *data)
{
struct dvobj_priv *dv = d;
struct sdio_data *sdio = dvobj_to_sdio(dv);
u8 *pbuf = data;
u32 sdio_read_size;
if (!data)
return _FAIL;
sdio_read_size = RND4(size);
sdio_read_size = rtw_sdio_cmd53_align_size(dv, sdio_read_size);
if (sdio_read_size > sdio->tmpbuf_sz) {
pbuf = rtw_malloc(sdio_read_size);
if (!pbuf)
return _FAIL;
}
if (rtw_sdio_read_cmd53(dv, offset, pbuf, sdio_read_size) == _FAIL) {
RTW_ERR("%s: I/O FAIL!\n", __FUNCTION__);
goto exit;
}
if (pbuf != data)
_rtw_memcpy(data, pbuf, size);
exit:
if (pbuf != data)
rtw_mfree(pbuf, sdio_read_size);
return _SUCCESS;
}
static inline u8 _os_sdio_cmd53_r(void *d, u32 offset, u32 size, u8 *data)
{
u8 ret;
ret = rtw_sdio_read_cmd53((struct dvobj_priv *)d, offset, data, size);
if (ret == _FAIL) {
RTW_ERR("%s: I/O FAIL!\n", __FUNCTION__);
return _FAIL;
}
return _SUCCESS;
}
static inline void _os_sdio_cmd52_w8(void *d, u32 offset, u8 val)
{
if (rtw_sdio_write_cmd52((struct dvobj_priv *)d, offset, &val, 1) == _FAIL)
RTW_ERR("%s: I/O FAIL!\n", __FUNCTION__);
}
static inline void _os_sdio_cmd53_w8(void *d, u32 offset, u8 val)
{
if (rtw_sdio_write_cmd53((struct dvobj_priv *)d, offset, &val, 1) == _FAIL)
RTW_ERR("%s: I/O FAIL!\n", __FUNCTION__);
}
static inline void _os_sdio_cmd53_w16(void *d, u32 offset, u16 val)
{
val = cpu_to_le16(val);
if (rtw_sdio_write_cmd53((struct dvobj_priv *)d, offset, &val, 2) == _FAIL)
RTW_ERR("%s: I/O FAIL!\n", __FUNCTION__);
}
static inline void _os_sdio_cmd53_w32(void *d, u32 offset, u32 val)
{
val = cpu_to_le32(val);
if (rtw_sdio_write_cmd53((struct dvobj_priv *)d, offset, &val, 4) == _FAIL)
RTW_ERR("%s: I/O FAIL!\n", __FUNCTION__);
}
static inline void _os_sdio_cmd53_wn(void *d, u32 offset, u32 size, u8 *data)
{
struct dvobj_priv *dv = d;
struct sdio_data *sdio = dvobj_to_sdio(dv);
u8 *pbuf = data;
if (size > sdio->tmpbuf_sz) {
pbuf = rtw_malloc(size);
if (!pbuf)
return;
_rtw_memcpy(pbuf, data, size);
}
if (rtw_sdio_write_cmd53(dv, offset, pbuf, size) == _FAIL)
RTW_ERR("%s: I/O FAIL!\n", __FUNCTION__);
if (pbuf != data)
rtw_mfree(pbuf, size);
}
static inline void _os_sdio_cmd53_w(void *d, u32 offset, u32 size, u8 *data)
{
u8 ret;
ret = rtw_sdio_write_cmd53((struct dvobj_priv *)d, offset, data, size);
if (ret == _FAIL)
RTW_ERR("%s: I/O FAIL!\n", __FUNCTION__);
}
static inline u8 _os_sdio_f0_read(void *d, u32 addr, void *buf, size_t len)
{
return rtw_sdio_f0_read((struct dvobj_priv *)d, addr, buf, len);
}
static inline u8 _os_sdio_read_cia_r8(void *d, u32 addr)
{
u8 data = 0;
if (rtw_sdio_f0_read((struct dvobj_priv *)d, addr, &data, 1) == _FAIL)
RTW_ERR("%s: read sdio cia FAIL!\n", __FUNCTION__);
return data;
}
#endif /*CONFIG_SDIO_HCI*/
#endif /*_PLTFM_OPS_LINUX_H_*/
|
2301_81045437/rtl8852be
|
phl/pltfm_ops_linux.h
|
C
|
agpl-3.0
| 23,187
|
/******************************************************************************
*
* Copyright(c) 2019 - 2020 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#ifndef _PLTFM_OPS_MACOS_H_
#define _PLTFM_OPS_MACOS_H_
static __inline char *_os_strsep(char **s, const char *ct)
{
return NULL;
}
static __inline int _os_sscanf(const char *buf, const char *fmt, ...)
{
return 0;
}
static __inline int _os_strcmp(const char *s1, const char *s2)
{
return 0;
}
static __inline int _os_strncmp(const char *s1, const char *s2, size_t n)
{
return 0;
}
static __inline char *_os_strcpy(char *dest, const char *src)
{
return NULL;
}
static inline char *_os_strncpy(char *dest, const char *src, size_t n)
{
return NULL;
}
static __inline char *_os_strchr(const char *s, int c)
{
while (*s != (char)c)
if (*s++ == '\0')
return NULL;
return (char *)s;
}
static __inline int _os_snprintf(char *str, size_t size, const char *format, ...)
{
return 0;
}
static __inline u32 _os_strlen(u8 *buf)
{
return 0;
}
static __inline void _os_delay_ms(void *h, u32 ms)
{
}
static __inline void _os_delay_us(void *h, u32 us)
{
}
static __inline void _os_sleep_ms(void *h, u32 ms)
{
}
static __inline void _os_sleep_us(void *h, u32 us)
{
}
static inline u32 _os_get_cur_time_us(void)
{
return 0;
}
static inline u32 _os_get_cur_time_ms(void)
{
return 0;
}
static inline u64 _os_modular64(u64 x, u64 y)
{
return x % y;
}
static inline u64 _os_division64(u64 x, u64 y)
{
return x / y;
}
#ifdef CONFIG_PCI_HCI
static inline void _os_cache_inv(void *d, _dma *bus_addr_l, _dma *bus_addr_h,
u32 buf_sz, u8 direction)
{
}
static inline void _os_cache_wback(void *d, _dma *bus_addr_l,
_dma *bus_addr_h, u32 buf_sz, u8 direction)
{
}
/* txbd, rxbd, wd */
static inline void *_os_shmem_alloc(void *d, _dma *bus_addr_l,
_dma *bus_addr_h, u32 buf_sz,
u8 cache, u8 direction, void **os_rsvd)
{
return NULL;
}
static inline void _os_shmem_free(void *d, u8 *vir_addr, _dma *bus_addr_l,
_dma *bus_addr_h, u32 buf_sz,
u8 cache, u8 direction, void *os_rsvd)
{
return NULL;
}
#endif
static inline void *_os_pkt_buf_unmap_rx(void *d, _dma bus_addr_l, _dma bus_addr_h, u32 buf_sz)
{
return NULL;
}
static inline void *_os_pkt_buf_map_rx(void *d, _dma *bus_addr_l, _dma *bus_addr_h,
u32 buf_sz, void *os_priv)
{
return NULL;
}
static inline void *_os_pkt_buf_alloc_rx(void *d, _dma *bus_addr_l, _dma *bus_addr_h,
u32 buf_sz, void **os_priv)
{
return NULL;
}
static inline u8 *_os_pkt_buf_free_rx(void *d, u8 *vir_addr, _dma bus_addr_l,
_dma bus_addr_h, u32 buf_sz, void *os_priv)
{
return NULL;
}
/* phl pre-alloc network layer buffer */
static inline void * _os_alloc_netbuf(void *d, u32 buf_sz, void **os_priv)
{
return NULL; // windows never do this.
}
/* Free netbuf for error case. (ex. drop rx-reorder packet) */
static inline void _os_free_netbuf(void *d, u8 *vir_addr, u32 buf_sz, void *os_priv)
{
}
static __inline void *_os_mem_alloc(void *h, u32 buf_sz)
{
return NULL;
}
static __inline void _os_mem_free(void *h, void *buf, u32 buf_sz)
{
}
/*physically contiguous memory if the buffer will be accessed by a DMA device*/
static inline void *_os_kmem_alloc(void *h, u32 buf_sz)
{
return NULL;
}
/*physically contiguous memory if the buffer will be accessed by a DMA device*/
static inline void _os_kmem_free(void *h, void *buf, u32 buf_sz)
{
}
static __inline void _os_mem_set(void *h, void *buf, s8 value, u32 size)
{
}
static __inline void _os_mem_cpy(void *h, void *dest, void *src, u32 size)
{
}
static __inline int _os_mem_cmp(void *h, void *ptr1, void *ptr2, u32 size)
{
return 0;
}
static __inline void _os_init_timer(void *h, _os_timer *timer,
void (*call_back_func)(void *context), void *context,
const char *sz_id)
{
}
static __inline void _os_set_timer(void *h, _os_timer *timer, u32 ms_delay)
{
}
static __inline void _os_cancel_timer(void *h, _os_timer *timer)
{
}
static inline void _os_cancel_timer_async(void *d, _os_timer *timer)
{
}
static __inline void _os_release_timer(void *h, _os_timer *timer)
{
}
static __inline void _os_mutex_init(void *h, _os_mutex *mutex)
{
}
static __inline void _os_mutex_deinit(void *h, _os_mutex *mutex)
{
}
static __inline void _os_mutex_lock(void *h, _os_mutex *mutex)
{
}
static __inline void _os_mutex_unlock(void *h, _os_mutex *mutex)
{
}
static inline void _os_sema_init(void *h, _os_sema *sema, int int_cnt)
{
}
static inline void _os_sema_free(void *h, _os_sema *sema)
{
}
static inline void _os_sema_up(void *h, _os_sema *sema)
{
}
static inline u8 _os_sema_down(void *h, _os_sema *sema)
{
return 0; //success
}
static __inline void _os_spinlock_init(void *d, _os_lock *plock)
{
}
static __inline void _os_spinlock_free(void *d, _os_lock *plock)
{
}
static inline void _os_spinlock(void *d, _os_lock *plock,
enum lock_type type, _os_spinlockfg *flags)
{
}
static inline void _os_spinunlock(void *d, _os_lock *plock,
enum lock_type type, _os_spinlockfg *flags)
{
}
/* event */
static __inline void _os_event_init(void *h, _os_event *event)
{
}
static __inline void _os_event_free(void *h, _os_event *event)
{
}
static __inline void _os_event_reset(void *h, _os_event *event)
{
}
static __inline void _os_event_set(void *h, _os_event *event)
{
}
/*
* m_sec
* == 0 : wait for completion
* > 0 : wait for timeout or completion
* return value
* 0:timeout
* otherwise:success
*/
static __inline int _os_event_wait(void *h, _os_event *event, u32 m_sec)
{
return 0;
}
static inline int _os_test_and_clear_bit(int nr, unsigned long *addr)
{
/*UNDO*/
return 0;
}
static inline int _os_test_and_set_bit(int nr, unsigned long *addr)
{
/*UNDO*/
return 1;
}
/* Atomic integer operations */
static __inline void _os_atomic_set(void *d, _os_atomic *v, int i)
{
}
static __inline int _os_atomic_read(void *d, _os_atomic *v)
{
return 0;
}
static __inline void _os_atomic_add(void *d, _os_atomic *v, int i)
{
}
static __inline void _os_atomic_sub(void *d, _os_atomic *v, int i)
{
}
static __inline void _os_atomic_inc(void *d, _os_atomic *v)
{
}
static __inline void _os_atomic_dec(void *d, _os_atomic *v)
{
}
static __inline int _os_atomic_add_return(void *d, _os_atomic *v, int i)
{
return 0;
}
static __inline int _os_atomic_sub_return(void *d, _os_atomic *v, int i)
{
return 0;
}
static __inline int _os_atomic_inc_return(void *d, _os_atomic *v)
{
return 0;
}
static __inline int _os_atomic_dec_return(void *d, _os_atomic *v)
{
return 0;
}
/*
static __inline bool _os_atomic_inc_unless(void *d, _os_atomic *v, int u)
{
return 0;
}
*/
static inline u8 _os_tasklet_init(void *drv_priv, _os_tasklet *task,
void (*call_back_func)(void* context), void *context)
{
return 0;
}
static inline u8 _os_tasklet_deinit(void *drv_priv, _os_tasklet *task)
{
return 0;
}
static inline u8 _os_tasklet_schedule(void *drv_priv, _os_tasklet *task)
{
return 0;
}
static __inline u8 _os_thread_init( void *drv_priv, _os_thread *thread,
int (*call_back_func)(void * context),
void *context,
const char namefmt[])
{
return RTW_PHL_STATUS_FAILURE;
}
static __inline u8 _os_thread_deinit(void *drv_priv, _os_thread *thread)
{
return RTW_PHL_STATUS_FAILURE;
}
static __inline enum rtw_phl_status _os_thread_schedule(void *drv_priv, _os_thread *thread)
{
return RTW_PHL_STATUS_FAILURE;
}
static inline void _os_thread_stop(void *drv_priv, _os_thread *thread)
{
}
static inline int _os_thread_check_stop(void *drv_priv, _os_thread *thread)
{
return 1;
}
static inline int _os_thread_wait_stop(void *drv_priv, _os_thread *thread)
{
return RTW_PHL_STATUS_SUCCESS;
}
#if 0 /* TODO */
static inline _os_thread _os_thread_start(int (*threadfn)(void *data),
void *data, const char namefmt[])
{
return 0;
}
static inline bool _os_thread_stop(_os_thread th)
{
return 0;
}
static inline void _os_thread_wait_stop(void)
{
}
static inline int _os_thread_check_stop(void)
{
return 0;
}
#endif
static inline u8 _os_workitem_init(void *drv_priv, _os_workitem *workitem,
void (*call_back_func)(void* context), void *context)
{
return 0;
}
static inline u8 _os_workitem_schedule(void *drv_priv, _os_workitem *workitem)
{
return 0;
}
static inline u8 _os_workitem_deinit(void *drv_priv, _os_workitem *workitem)
{
return 0;
}
/* File Operation */
static inline u32 _os_read_file(const char *path, u8 *buf, u32 sz)
{
/* OS Dependent API */
return 0;
}
#ifdef CONFIG_PCI_HCI
static __inline u8 _os_read8_pcie(void *h, u32 addr)
{
return 0;
}
static __inline u16 _os_read16_pcie(void *h, u32 addr)
{
return 0;
}
static __inline u32 _os_read32_pcie(void *h, u32 addr)
{
return 0;
}
static __inline u32 _os_write8_pcie(void *h, u32 addr, u8 val)
{
return 0;
}
static __inline u32 _os_write16_pcie(void *h, u32 addr, u16 val)
{
return 0;
}
static __inline u32 _os_write32_pcie(void *h, u32 addr, u32 val)
{
return 0;
}
#endif/*#ifdef CONFIG_PCI_HCI*/
#ifdef CONFIG_USB_HCI
static __inline u32 _os_usbctrl_vendorreq(void *h, u8 request, u16 value,
u16 index, void *pdata, u16 len, u8 requesttype)
{
return 0;
}
static inline int os_usb_tx(void *h, u8 *tx_buf_ptr,
u8 bulk_id, u32 len, u8 *pkt_data_buf)
{
return 1;
}
#endif /*CONFIG_USB_HCI*/
#ifdef CONFIG_SDIO_HCI
static __inline u8 _os_sdio_cmd52_r8(void *d, u32 offset)
{
return 0;
}
static __inline u8 _os_sdio_cmd53_r8(void *d, u32 offset)
{
return 0;
}
static __inline u16 _os_sdio_cmd53_r16(void *d, u32 offset)
{
return 0;
}
static __inline u32 _os_sdio_cmd53_r32(void *d, u32 offset)
{
return 0;
}
static __inline u8 _os_sdio_cmd53_rn(void *d, u32 offset, u32 size, u8 *data)
{
return 0;
}
static __inline u8 _os_sdio_cmd53_r(void *d, u32 offset, u32 size, u8 *data)
{
/* TODO: implement read RX FIFO */
return 0;
}
static __inline void _os_sdio_cmd52_w8(void *d, u32 offset, u8 val)
{
}
static __inline void _os_sdio_cmd53_w8(void *d, u32 offset, u8 val)
{
}
static __inline void _os_sdio_cmd53_w16(void *d, u32 offset, u16 val)
{
}
static __inline void _os_sdio_cmd53_w32(void *d, u32 offset, u32 val)
{
}
static __inline void _os_sdio_cmd53_wn(void *d, u32 offset, u32 size, u8 *data)
{
}
static __inline void _os_sdio_cmd53_w(void *d, u32 offset, u32 size, u8 *data)
{
}
static __inline u8 _os_sdio_f0_read(void *d, u32 addr, void *buf, size_t len)
{
return 0;
}
static __inline u8 _os_sdio_read_cia_r8(void *d, u32 addr)
{
return 0;
}
#endif /*CONFIG_SDIO_HCI*/
/* temp os dependency */
/* can delete if osdep ready */
#endif /*_PLTFM_OPS_MACOS_H_*/
|
2301_81045437/rtl8852be
|
phl/pltfm_ops_macos.h
|
C
|
agpl-3.0
| 10,994
|
/******************************************************************************
*
* Copyright(c) 2019 - 2020 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#ifndef _PLTFM_OPS_NONE_H_
#define _PLTFM_OPS_NONE_H_
#include "phl_types.h"
static char *_os_strpbrk(const char *cs, const char *ct)
{
const char *sc1, *sc2;
for (sc1 = cs; *sc1 != '\0'; ++sc1) {
for (sc2 = ct; *sc2 != '\0'; ++sc2) {
if (*sc1 == *sc2)
return (char *)sc1;
}
}
return NULL;
}
static __inline char *_os_strsep(char **s, const char *ct)
{
char *sbegin = *s;
char *end;
if (sbegin == NULL)
return NULL;
end = _os_strpbrk(sbegin, ct);
if (end)
*end++ = '\0';
*s = end;
return sbegin;
}
static __inline int _os_sscanf(const char *buf, const char *fmt, ...)
{
return 0;
}
static __inline int _os_strcmp(const char *s1, const char *s2)
{
while (*s1 == *s2) {
if (*s1 == '\0')
break;
s1++;
s2++;
}
return *s1 - *s2;
}
static __inline int _os_strncmp(const char *s1, const char *s2, size_t n)
{
if (n == 0)
return 0;
while (*s1 == *s2) {
if (*s1 == '\0')
break;
s1++;
s2++;
n--;
if (n == 0)
return 0;
}
return *s1 - *s2;
}
static __inline char *_os_strcpy(char *dest, const char *src)
{
return NULL;
}
static inline char *_os_strncpy(char *dest, const char *src, size_t n)
{
return NULL;
}
static __inline char *_os_strchr(const char *s, int c)
{
while (*s != (char)c)
if (*s++ == '\0')
return NULL;
return (char *)s;
}
static __inline int _os_snprintf(char *str, size_t size, const char *format, ...)
{
return 0;
}
static __inline int _os_strncat(char *dest, char *src, size_t n)
{
return 0;
}
static __inline u32 _os_strlen(u8 *buf)
{
return 0;
}
static __inline void _os_delay_ms(void *h, u32 ms)
{
}
static __inline void _os_delay_us(void *h, u32 us)
{
}
static __inline void _os_sleep_ms(void *h, u32 ms)
{
}
static __inline void _os_sleep_us(void *h, u32 us)
{
}
static inline u32 _os_get_cur_time_us(void)
{
return 0;
}
static inline u32 _os_get_cur_time_ms(void)
{
return (_os_get_cur_time_us() / 1000);
}
static inline u64 _os_modular64(u64 x, u64 y)
{
return x % y;
}
static inline u64 _os_division64(u64 x, u64 y)
{
u32 low, low2, high, rem, result;
low = x & 0xFFFFFFFF;
high = x >> 32;
rem = high % (u32)y;
high= high / (u32)y;
low2 = low >> 16;
low2 += rem << 16;
rem = low2 % (u32)y;
low2 = low2 / (u32)y;
low = low & 0xFFFFFFFF;
low += rem << 16;
rem = low % (u32)y;
low = low / (u32)y;
result = low + ((u64)low2 << 16) + ((u64)high << 32);
return result;
}
static inline u64 _os_minus64(u64 x, u64 y)
{
return x - y;
}
static inline u64 _os_add64(u64 x, u64 y)
{
return x + y;
}
static inline u32 _os_div_round_up(u32 x, u32 y)
{
return (x + y - 1) / y;
}
#ifdef CONFIG_PCI_HCI
static inline void _os_cache_inv(void *d, _dma *bus_addr_l, _dma *bus_addr_h,
u32 buf_sz, u8 direction)
{
}
static inline void _os_cache_wback(void *d, _dma *bus_addr_l,
_dma *bus_addr_h, u32 buf_sz, u8 direction)
{
}
/* txbd, rxbd, wd */
static inline void *_os_shmem_alloc(void *d, _dma *bus_addr_l,
_dma *bus_addr_h, u32 buf_sz,
u8 cache, u8 direction, void **os_rsvd)
{
return NULL;
}
static inline void _os_shmem_free(void *d, u8 *vir_addr, _dma *bus_addr_l,
_dma *bus_addr_h, u32 buf_sz,
u8 cache, u8 direction, void *os_rsvd)
{
}
#endif /*CONFIG_PCI_HCI*/
static inline void *_os_pkt_buf_unmap_rx(void *d, _dma bus_addr_l, _dma bus_addr_h, u32 buf_sz)
{
return NULL;
}
static inline void *_os_pkt_buf_map_rx(void *d, _dma *bus_addr_l, _dma *bus_addr_h,
u32 buf_sz, void *os_priv)
{
return NULL;
}
static inline void *_os_pkt_buf_alloc_rx(void *d, _dma *bus_addr_l,
_dma *bus_addr_h, u32 buf_sz, void **os_priv)
{
return NULL;
}
static inline u8 *_os_pkt_buf_free_rx(void *d, u8 *vir_addr, _dma bus_addr_l,
_dma bus_addr_h, u32 buf_sz, void *os_priv)
{
return NULL;
}
/* phl pre-alloc network layer buffer */
static inline void * _os_alloc_netbuf(void *d, u32 buf_sz, void **os_priv)
{
return NULL; // windows never do this.
}
/* Free netbuf for error case. (ex. drop rx-reorder packet) */
static inline void _os_free_netbuf(void *d, u8 *vir_addr, u32 buf_sz, void *os_priv)
{
}
static __inline void *_os_mem_alloc(void *h, u32 buf_sz)
{
return NULL;
}
static __inline void _os_mem_free(void *h, void *buf, u32 buf_sz)
{
}
/*physically contiguous memory if the buffer will be accessed by a DMA device*/
static __inline void *_os_kmem_alloc(void *h, u32 buf_sz)
{
return NULL;
}
/*physically contiguous memory if the buffer will be accessed by a DMA device*/
static __inline void _os_kmem_free(void *h, void *buf, u32 buf_sz)
{
}
static __inline void _os_mem_set(void *h, void *buf, s8 value, u32 size)
{
}
static __inline void _os_mem_cpy(void *h, void *dest, void *src, u32 size)
{
}
static __inline int _os_mem_cmp(void *h, void *ptr1, void *ptr2, u32 size)
{
return 0;
}
static __inline void _os_init_timer(void *h, _os_timer *timer,
void (*call_back_func)(void *context), void *context,
const char *sz_id)
{
}
static __inline void _os_set_timer(void *h, _os_timer *timer, u32 ms_delay)
{
}
static __inline void _os_cancel_timer(void *h, _os_timer *timer)
{
}
static inline void _os_cancel_timer_async(void *d, _os_timer *timer)
{
}
static __inline void _os_release_timer(void *h, _os_timer *timer)
{
}
static __inline void _os_mutex_init(void *h, _os_mutex *mutex)
{
}
static __inline void _os_mutex_deinit(void *h, _os_mutex *mutex)
{
}
static __inline void _os_mutex_lock(void *h, _os_mutex *mutex)
{
}
static __inline void _os_mutex_unlock(void *h, _os_mutex *mutex)
{
}
static inline void _os_sema_init(void *d, _os_sema *sema, int int_cnt)
{
}
static inline void _os_sema_free(void *d, _os_sema *sema)
{
}
static inline void _os_sema_up(void *d, _os_sema *sema)
{
}
static inline u8 _os_sema_down(void *d, _os_sema *sema)
{
return 0; //success
}
/* event */
static __inline void _os_event_init(void *h, _os_event *event)
{
}
static __inline void _os_event_free(void *h, _os_event *event)
{
}
static __inline void _os_event_reset(void *h, _os_event *event)
{
}
static __inline void _os_event_set(void *h, _os_event *event)
{
}
/*
* m_sec
* == 0 : wait for completion
* > 0 : wait for timeout or completion
* return value
* 0:timeout
* otherwise:success
*/
static __inline int _os_event_wait(void *h, _os_event *event, u32 m_sec)
{
return 0;
}
/* spinlock */
static __inline void _os_spinlock_init(void *d, _os_lock *plock)
{
}
static __inline void _os_spinlock_free(void *d, _os_lock *plock)
{
}
static inline void _os_spinlock(void *d, _os_lock *plock,
enum lock_type type, _os_spinlockfg *flags)
{
}
static inline void _os_spinunlock(void *d, _os_lock *plock,
enum lock_type type, _os_spinlockfg *flags)
{
}
static inline int _os_test_and_clear_bit(int nr, unsigned long *addr)
{
/*UNDO*/
return 0;
}
static inline int _os_test_and_set_bit(int nr, unsigned long *addr)
{
/*UNDO*/
return 1;
}
/* Atomic integer operations */
static __inline void _os_atomic_set(void *d, _os_atomic *v, int i)
{
}
static __inline int _os_atomic_read(void *d, _os_atomic *v)
{
return 0;
}
static __inline void _os_atomic_add(void *d, _os_atomic *v, int i)
{
}
static __inline void _os_atomic_sub(void *d, _os_atomic *v, int i)
{
}
static __inline void _os_atomic_inc(void *d, _os_atomic *v)
{
}
static __inline void _os_atomic_dec(void *d, _os_atomic *v)
{
}
static __inline int _os_atomic_add_return(void *d, _os_atomic *v, int i)
{
return 0;
}
static __inline int _os_atomic_sub_return(void *d, _os_atomic *v, int i)
{
return 0;
}
static __inline int _os_atomic_inc_return(void *d, _os_atomic *v)
{
return 0;
}
static __inline int _os_atomic_dec_return(void *d, _os_atomic *v)
{
return 0;
}
/*
static __inline bool _os_atomic_inc_unless(void *d, _os_atomic *v, int u)
{
return 0;
}
*/
static inline enum rtw_phl_status _os_tasklet_init(void *drv_priv, _os_tasklet *task,
void (*call_back_func)(void* context), void *context)
{
return RTW_PHL_STATUS_SUCCESS;
}
static inline enum rtw_phl_status _os_tasklet_deinit(void *drv_priv, _os_tasklet *task)
{
return RTW_PHL_STATUS_SUCCESS;
}
static inline enum rtw_phl_status _os_tasklet_schedule(void *drv_priv, _os_tasklet *task)
{
return RTW_PHL_STATUS_SUCCESS;
}
static __inline u8 _os_thread_init( void *drv_priv, _os_thread *thread,
int (*call_back_func)(void * context),
void *context,
const char namefmt[])
{
return RTW_PHL_STATUS_FAILURE;
}
static __inline u8 _os_thread_deinit(void *drv_priv, _os_thread *thread)
{
return RTW_PHL_STATUS_FAILURE;
}
static __inline enum rtw_phl_status _os_thread_schedule(void *drv_priv, _os_thread *thread)
{
return RTW_PHL_STATUS_FAILURE;
}
static inline void _os_thread_stop(void *drv_priv, _os_thread *thread)
{
}
static __inline int _os_thread_check_stop(void *drv_priv, _os_thread *thread)
{
return 1;
}
static inline int _os_thread_wait_stop(void *drv_priv, _os_thread *thread)
{
return RTW_PHL_STATUS_SUCCESS;
}
#if 0
static inline _os_thread _os_thread_start(int (*threadfn)(void *data),
void *data, const char namefmt[])
{
return 0;
}
static inline bool _os_thread_stop(_os_thread th)
{
return 0;
}
static inline void _os_thread_wait_stop(void)
{
}
static inline int _os_thread_should_stop(void)
{
return 0;
}
#endif
static inline enum rtw_phl_status _os_workitem_init(void *drv_priv, _os_workitem *workitem,
void (*call_back_func)(void* context), void *context)
{
return RTW_PHL_STATUS_SUCCESS;
}
static inline enum rtw_phl_status _os_workitem_schedule(void *drv_priv, _os_workitem *workitem)
{
return RTW_PHL_STATUS_SUCCESS;
}
static inline enum rtw_phl_status _os_workitem_deinit(void *drv_priv, _os_workitem *workitem)
{
return RTW_PHL_STATUS_SUCCESS;
}
/*
* _os_read_file - phl read file api
* @path: path of the file to open and read
* @buf: the address of allocated buffer to store the file content
* @sz: the bytes to read at most
*
* returns bytes read
*/
static inline u32 _os_read_file(const char *path, u8 *buf, u32 sz)
{
/* OS Dependent API */
return 0;
}
#ifdef CONFIG_PCI_HCI
static __inline u8 _os_read8_pcie(void *h, u32 addr)
{
return 0;
}
static __inline u16 _os_read16_pcie(void *h, u32 addr)
{
return 0;
}
static __inline u32 _os_read32_pcie(void *h, u32 addr)
{
return 0;
}
static __inline u32 _os_write8_pcie(void *h, u32 addr, u8 val)
{
return 0;
}
static __inline u32 _os_write16_pcie(void *h, u32 addr, u16 val)
{
return 0;
}
static __inline u32 _os_write32_pcie(void *h, u32 addr, u32 val)
{
return 0;
}
#endif/*#ifdef CONFIG_PCI_HCI*/
#ifdef CONFIG_USB_HCI
static __inline u32 _os_usbctrl_vendorreq(void *h, u8 request, u16 value,
u16 index, void *pdata, u16 len, u8 requesttype)
{
return 0;
}
static inline int os_usb_tx(void *h, u8 *tx_buf_ptr,
u8 bulk_id, u32 len, u8 *pkt_data_buf)
{
return 1;
}
static __inline void os_enable_usb_out_pipes(void *drv_priv)
{
}
static __inline void os_disable_usb_out_pipes(void *drv_priv)
{
/* Free bulkout urb */
}
static __inline u8 os_out_token_alloc(void *drv_priv)
{
return 0; // RTW_PHL_STATUS_SUCCESS
}
static __inline void os_out_token_free(void *drv_priv)
{
}
static __inline u8 os_in_token_alloc(void *drv_priv)
{
// Allocate in token (pUrb) list
return 0;
}
static __inline void os_in_token_free(void *drv_priv)
{
// Free in token (pUrb) list
}
static __inline u8 os_send_usb_in_token(void *drv_priv, void *rxobj, u8 *inbuf, u32 inbuf_len, u8 pipe_idx, u8 minLen)
{
// send rtw_rx_buf to os
return 0;
}
static __inline void os_enable_usb_in_pipes(void *drv_priv)
{
}
static __inline void os_disable_usb_in_pipes(void *drv_priv)
{
}
#endif /*CONFIG_USB_HCI*/
#ifdef CONFIG_SDIO_HCI
static inline u8 _os_sdio_cmd52_r8(void *d, u32 offset)
{
return 0;
}
static inline u8 _os_sdio_cmd53_r8(void *d, u32 offset)
{
return 0;
}
static inline u16 _os_sdio_cmd53_r16(void *d, u32 offset)
{
return 0;
}
static inline u32 _os_sdio_cmd53_r32(void *d, u32 offset)
{
return 0;
}
static inline u8 _os_sdio_cmd53_rn(void *d, u32 offset, u32 size, u8 *data)
{
return 0;
}
static inline u8 _os_sdio_cmd53_r(void *d, u32 offset, u32 size, u8 *data)
{
return 0;
}
static inline void _os_sdio_cmd52_w8(void *d, u32 offset, u8 val)
{
}
static inline void _os_sdio_cmd53_w8(void *d, u32 offset, u8 val)
{
}
static inline void _os_sdio_cmd53_w16(void *d, u32 offset, u16 val)
{
}
static inline void _os_sdio_cmd53_w32(void *d, u32 offset, u32 val)
{
}
static inline void _os_sdio_cmd53_wn(void *d, u32 offset, u32 size, u8 *data)
{
}
static inline void _os_sdio_cmd53_w(void *d, u32 offset, u32 size, u8 *data)
{
}
static inline u8 _os_sdio_f0_read(void *d, u32 addr, void *buf, size_t len)
{
return 0;
}
static inline u8 _os_sdio_read_cia_r8(void *d, u32 addr)
{
return 0;
}
#endif /*CONFIG_SDIO_HCI*/
/*
* Continuous bits starting from least significant bit
* Example:
* BIT_LEN_MASK_32(0) => 0x00000000
* BIT_LEN_MASK_32(1) => 0x00000001
* BIT_LEN_MASK_32(2) => 0x00000003
* BIT_LEN_MASK_32(32) => 0xFFFFFFFF
*/
#define BIT_LEN_MASK_32(__BitLen) ((u32)(0xFFFFFFFF >> (32 - (__BitLen))))
#define BIT_LEN_MASK_16(__BitLen) ((u16)(0xFFFF >> (16 - (__BitLen))))
#define BIT_LEN_MASK_8(__BitLen) ((u8)(0xFF >> (8 - (__BitLen))))
/*
* Continuous bits starting from least significant bit
* Example:
* BIT_OFFSET_LEN_MASK_32(0, 2) => 0x00000003
* BIT_OFFSET_LEN_MASK_32(16, 2) => 0x00030000
*/
#define BIT_OFFSET_LEN_MASK_32(__BitOffset, __BitLen) ((u32)(BIT_LEN_MASK_32(__BitLen) << (__BitOffset)))
#define BIT_OFFSET_LEN_MASK_16(__BitOffset, __BitLen) ((u16)(BIT_LEN_MASK_16(__BitLen) << (__BitOffset)))
#define BIT_OFFSET_LEN_MASK_8(__BitOffset, __BitLen) ((u8)(BIT_LEN_MASK_8(__BitLen) << (__BitOffset)))
/*
* Convert LE data to host byte order
*/
#define EF1Byte (u8)
#define EF2Byte le16_to_cpu
#define EF4Byte le32_to_cpu
/*
* Read LE data from memory to host byte order
*/
#define ReadLE4Byte(_ptr) le32_to_cpu(*((u32 *)(_ptr)))
#define ReadLE2Byte(_ptr) le16_to_cpu(*((u16 *)(_ptr)))
#define ReadLE1Byte(_ptr) (*((u8 *)(_ptr)))
/*
* Read BE data from memory to host byte order
*/
#define ReadBEE4Byte(_ptr) be32_to_cpu(*((u32 *)(_ptr)))
#define ReadBE2Byte(_ptr) be16_to_cpu(*((u16 *)(_ptr)))
#define ReadBE1Byte(_ptr) (*((u8 *)(_ptr)))
/*
* Write host byte order data to memory in LE order
*/
#define WriteLE4Byte(_ptr, _val) ((*((u32 *)(_ptr))) = cpu_to_le32(_val))
#define WriteLE2Byte(_ptr, _val) ((*((u16 *)(_ptr))) = cpu_to_le16(_val))
#define WriteLE1Byte(_ptr, _val) ((*((u8 *)(_ptr))) = ((u8)(_val)))
/*
* Write host byte order data to memory in BE order
*/
#define WriteBE4Byte(_ptr, _val) ((*((u32 *)(_ptr))) = cpu_to_be32(_val))
#define WriteBE2Byte(_ptr, _val) ((*((u16 *)(_ptr))) = cpu_to_be16(_val))
#define WriteBE1Byte(_ptr, _val) ((*((u8 *)(_ptr))) = ((u8)(_val)))
/*
* Return 4-byte value in host byte ordering from 4-byte pointer in litten-endian system.
*/
#define LE_P4BYTE_TO_HOST_4BYTE(__pStart) (le32_to_cpu(*((u32 *)(__pStart))))
#define LE_P2BYTE_TO_HOST_2BYTE(__pStart) (le16_to_cpu(*((u16 *)(__pStart))))
#define LE_P1BYTE_TO_HOST_1BYTE(__pStart) ((*((u8 *)(__pStart))))
/*
* Return 4-byte value in host byte ordering from 4-byte pointer in big-endian system.
*/
#define BE_P4BYTE_TO_HOST_4BYTE(__pStart) (be32_to_cpu(*((u32 *)(__pStart))))
#define BE_P2BYTE_TO_HOST_2BYTE(__pStart) (be16_to_cpu(*((u16 *)(__pStart))))
#define BE_P1BYTE_TO_HOST_1BYTE(__pStart) ((*((u8 *)(__pStart))))
/*
* Translate subfield (continuous bits in little-endian) of 4-byte value in LE byte to
* 4-byte value in host byte ordering.
*/
#define LE_BITS_TO_4BYTE(__pStart, __BitOffset, __BitLen) \
((LE_P4BYTE_TO_HOST_4BYTE(__pStart) >> (__BitOffset)) & BIT_LEN_MASK_32(__BitLen))
#define LE_BITS_TO_2BYTE(__pStart, __BitOffset, __BitLen) \
((LE_P2BYTE_TO_HOST_2BYTE(__pStart) >> (__BitOffset)) & BIT_LEN_MASK_16(__BitLen))
#define LE_BITS_TO_1BYTE(__pStart, __BitOffset, __BitLen) \
((LE_P1BYTE_TO_HOST_1BYTE(__pStart) >> (__BitOffset)) & BIT_LEN_MASK_8(__BitLen))
/*
* Translate subfield (continuous bits in big-endian) of 4-byte value in BE byte to
* 4-byte value in host byte ordering.
*/
#define BE_BITS_TO_4BYTE(__pStart, __BitOffset, __BitLen) \
((BE_P4BYTE_TO_HOST_4BYTE(__pStart) >> (__BitOffset)) & BIT_LEN_MASK_32(__BitLen))
#define BE_BITS_TO_2BYTE(__pStart, __BitOffset, __BitLen) \
((BE_P2BYTE_TO_HOST_2BYTE(__pStart) >> (__BitOffset)) & BIT_LEN_MASK_16(__BitLen))
#define BE_BITS_TO_1BYTE(__pStart, __BitOffset, __BitLen) \
((BE_P1BYTE_TO_HOST_1BYTE(__pStart) >> (__BitOffset)) & BIT_LEN_MASK_8(__BitLen))
/*
* Mask subfield (continuous bits in little-endian) of 4-byte value in LE byte oredering
* and return the result in 4-byte value in host byte ordering.
*/
#define LE_BITS_CLEARED_TO_4BYTE(__pStart, __BitOffset, __BitLen) \
(LE_P4BYTE_TO_HOST_4BYTE(__pStart) & (~BIT_OFFSET_LEN_MASK_32(__BitOffset, __BitLen)))
#define LE_BITS_CLEARED_TO_2BYTE(__pStart, __BitOffset, __BitLen) \
(LE_P2BYTE_TO_HOST_2BYTE(__pStart) & (~BIT_OFFSET_LEN_MASK_16(__BitOffset, __BitLen)))
#define LE_BITS_CLEARED_TO_1BYTE(__pStart, __BitOffset, __BitLen) \
(LE_P1BYTE_TO_HOST_1BYTE(__pStart) & ((u8)(~BIT_OFFSET_LEN_MASK_8(__BitOffset, __BitLen))))
/*
* Mask subfield (continuous bits in big-endian) of 4-byte value in BE byte oredering
* and return the result in 4-byte value in host byte ordering.
*/
#define BE_BITS_CLEARED_TO_4BYTE(__pStart, __BitOffset, __BitLen) \
(BE_P4BYTE_TO_HOST_4BYTE(__pStart) & (~BIT_OFFSET_LEN_MASK_32(__BitOffset, __BitLen)))
#define BE_BITS_CLEARED_TO_2BYTE(__pStart, __BitOffset, __BitLen) \
(BE_P2BYTE_TO_HOST_2BYTE(__pStart) & (~BIT_OFFSET_LEN_MASK_16(__BitOffset, __BitLen)))
#define BE_BITS_CLEARED_TO_1BYTE(__pStart, __BitOffset, __BitLen) \
(BE_P1BYTE_TO_HOST_1BYTE(__pStart) & (~BIT_OFFSET_LEN_MASK_8(__BitOffset, __BitLen)))
/*
* Set subfield of little-endian 4-byte value to specified value.
*/
#define SET_BITS_TO_LE_4BYTE(__pStart, __BitOffset, __BitLen, __Value) \
do { \
u8 __offset = __BitOffset, __len = __BitLen; \
if (__offset == 0 && __len == 32) \
WriteLE4Byte(__pStart, __Value); \
else { \
WriteLE4Byte(__pStart, \
LE_BITS_CLEARED_TO_4BYTE(__pStart, __BitOffset, __BitLen) \
| \
((((u32)__Value) & BIT_LEN_MASK_32(__BitLen)) << (__BitOffset)) \
); \
} \
} while (0)
#define SET_BITS_TO_LE_2BYTE(__pStart, __BitOffset, __BitLen, __Value) \
do { \
WriteLE2Byte(__pStart, \
LE_BITS_CLEARED_TO_2BYTE(__pStart, __BitOffset, __BitLen) \
| \
((((u16)__Value) & BIT_LEN_MASK_16(__BitLen)) << (__BitOffset)) \
); \
} while (0)
#define SET_BITS_TO_LE_1BYTE(__pStart, __BitOffset, __BitLen, __Value) \
do { \
u8 __offset = __BitOffset; u8 __len = __BitLen; \
if (__offset == 0 && __len == 8) \
WriteLE1Byte(__pStart, __Value); \
else { \
WriteLE1Byte(__pStart, \
LE_BITS_CLEARED_TO_1BYTE(__pStart, __BitOffset, __len) \
| \
((((u8)__Value) & BIT_LEN_MASK_8(__len)) << (__BitOffset)) \
); \
} \
} while (0)
/*
* Set subfield of big-endian 4-byte value to specified value.
*/
#define SET_BITS_TO_BE_4BYTE(__pStart, __BitOffset, __BitLen, __Value) \
do { \
if (__BitOffset == 0 && __BitLen == 32) \
WriteBE4Byte(__pStart, __Value); \
else { \
WriteBE4Byte(__pStart, \
BE_BITS_CLEARED_TO_4BYTE(__pStart, __BitOffset, __BitLen) \
| \
((((u32)__Value) & BIT_LEN_MASK_32(__BitLen)) << (__BitOffset)) \
); \
} \
} while (0)
#define SET_BITS_TO_BE_2BYTE(__pStart, __BitOffset, __BitLen, __Value) \
do { \
if (__BitOffset == 0 && __BitLen == 16) \
WriteBE2Byte(__pStart, __Value); \
else { \
WriteBE2Byte(__pStart, \
BE_BITS_CLEARED_TO_2BYTE(__pStart, __BitOffset, __BitLen) \
| \
((((u16)__Value) & BIT_LEN_MASK_16(__BitLen)) << (__BitOffset)) \
); \
} \
} while (0)
#define SET_BITS_TO_BE_1BYTE(__pStart, __BitOffset, __BitLen, __Value) \
do { \
if (__BitOffset == 0 && __BitLen == 8) \
WriteBE1Byte(__pStart, __Value); \
else { \
WriteBE1Byte(__pStart, \
BE_BITS_CLEARED_TO_1BYTE(__pStart, __BitOffset, __BitLen) \
| \
((((u8)__Value) & BIT_LEN_MASK_8(__BitLen)) << (__BitOffset)) \
); \
} \
} while (0)
#endif /*_PLTFM_OPS_NONE_H_*/
|
2301_81045437/rtl8852be
|
phl/pltfm_ops_none.h
|
C
|
agpl-3.0
| 20,744
|
/******************************************************************************
*
* Copyright(c) 2019 - 2020 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#ifndef _HAL_PLTFM_WINDOWS_H_
#define _HAL_PLTFM_WINDOWS_H_
#include <ntdef.h>
#include <ndis.h>
#include <stdio.h>
#include <stdarg.h>
#include <ntstrsafe.h>
#include "StatusCode.h"
#include "EndianFree.h"
#include "LinkList.h" //for N6C_pltfmdef - _RT_TIMER_HANDLE
#include "N6C_pltfmdef.h" //defined u8 :: XXX_pltfmdef.h must before pltfm_def.h
#include "pltfm_def.h"
#if defined(CONFIG_USB_HCI)
#include <wdftypes.h> //for WDFUSBPIPE
#include "core_util.h"
#include "core_usb.h"
#include "pltfm_usb.h"
#endif
#include "phl_config.h"
#ifdef CONFIG_PHL_WPP
//#include "hal_g6\phy\rf\halrf_wpp.h"
#include "hal_g6\mac\halmac_wpp.h"
#include "hal_g6\phy\bb\halbb_wpp.h"
#include "phl_wpp.h"
#include <rtwlanwpp.h>
#endif
#include "phl_types.h"
#include "phl_util.h"
#include "phl_def.h"
//#include "phl_types.h"
//#include "PlatformDef.h"
#define WriteLE4Byte(_ptr, _val) WriteEF4Byte(_ptr,_val)
#define WriteLE2Byte(_ptr, _val) WriteEF2Byte(_ptr,_val)
#define WriteLE1Byte(_ptr, _val) WriteEF1Byte(_ptr,_val)
#define WriteBE4Byte(_ptr, _val) WriteEF4Byte(_ptr,H2N1BYTE(_val))
#define WriteBE2Byte(_ptr, _val) WriteEF2Byte(_ptr,H2N2BYTE(_val))
#define WriteBE1Byte(_ptr, _val) WriteEF1Byte(_ptr,H2N4BYTE(_val))
static inline char *_os_strpbrk(const char *cs, const char *ct)
{
const char *sc1, *sc2;
for (sc1 = cs; *sc1 != '\0'; ++sc1) {
for (sc2 = ct; *sc2 != '\0'; ++sc2) {
if (*sc1 == *sc2)
return (char *)sc1;
}
}
return NULL;
}
static inline char *_os_strsep(char **s, const char *ct)
{
char *sbegin = *s;
char *end;
if (sbegin == NULL)
return NULL;
end = _os_strpbrk((const char *)sbegin, ct);
if (end)
*end++ = '\0';
*s = end;
return sbegin;
}
static inline int _os_strcmp(const char *s1, const char *s2)
{
return strcmp(s1, s2);
}
static inline int _os_strncmp(const char *s1, const char *s2, size_t n)
{
return strncmp(s1, s2, n);
}
static __inline char *_os_strcpy(char *dest, const char *src)
{
return strcpy(dest, src);
}
static inline char *_os_strncpy(char *dest, const char *src, size_t n)
{
return strncpy(dest, src, n);
}
static __inline char *_os_strchr(const char *s, int c)
{
return strchr(s, c);
}
#define _os_snprintf(s, sz, fmt, ...) _snprintf(s, sz, fmt, ##__VA_ARGS__)
#define _os_vsnprintf(str, size, fmt, args) RtlStringCbVPrintfA(str, size, fmt, args)
#define _os_strncat strncat
static __inline u32 _os_strlen(u8 *buf)
{
return (u32)strlen((const char *)buf);
}
#define _os_sscanf(buf, fmt, ...) sscanf_s(buf, fmt, ##__VA_ARGS__)
/* delay */
static __inline void _os_delay_ms(void * drv_priv, u32 ms)
{
PlatformForceStallExecution(1000 * ms);
}
static __inline void _os_delay_us(void *h, u32 us)
{
PlatformForceStallExecution(us);
}
static __inline void _os_sleep_ms(void *h, u32 ms)
{
PlatformForceStallExecution(1000 * ms);
}
static __inline void _os_sleep_us(void *h, u32 us)
{
PlatformForceStallExecution(us);
}
static inline u32 _os_get_cur_time_us(void)
{
u64 ret;
ret = PlatformGetCurrentTime();
return (u32)ret;
}
static inline u32 _os_get_cur_time_ms(void)
{
u64 ret;
ret = PlatformGetCurrentTime() / 1000;
return (u32)ret;
}
static inline u64 _os_modular64(u64 x, u64 y)
{
return x % y;
}
static inline u64 _os_division64(u64 x, u64 y)
{
return x / y;
}
static inline u64 _os_minus64(u64 x, u64 y)
{
return x - y;
}
static inline u64 _os_add64(u64 x, u64 y)
{
return x + y;
}
static inline u32 _os_div_round_up(u32 x, u32 y)
{
return (x + y - 1) / y;
}
static inline void *_os_pkt_buf_unmap_rx(void *d, _dma bus_addr_l, _dma bus_addr_h, u32 buf_sz)
{
return NULL;
}
static inline void *_os_pkt_buf_map_rx(void *d, _dma *bus_addr_l, _dma *bus_addr_h,
u32 buf_sz, void *os_priv)
{
return NULL;
}
static inline void *_os_pkt_buf_alloc_rx(void *d, _dma *bus_addr_l,
_dma *bus_addr_h, u32 buf_sz, void **os_priv)
{
struct _SHARED_MEMORY share_mem;
PlatformZeroMemory(&share_mem, sizeof(share_mem));
if (PlatformAllocateSharedMemory(d, &share_mem, buf_sz) != RT_STATUS_SUCCESS) {
share_mem.VirtualAddress = NULL;
*bus_addr_l = 0;
*bus_addr_h = 0;
*os_priv = NULL;
} else {
PlatformZeroMemory(share_mem.VirtualAddress, buf_sz);
#ifdef CONFIG_PCI_HCI
*bus_addr_l = (_dma)share_mem.PhysicalAddressLow;
*bus_addr_h = (_dma)share_mem.PhysicalAddressHigh;
#endif
#if WIFICX_BASED
*os_priv = share_mem.pltfm_rsvd[0];
#endif
}
return (u8 *)share_mem.VirtualAddress;
}
static inline void _os_pkt_buf_free_rx(void *d, u8 *vir_addr, _dma bus_addr_l,
_dma bus_addr_h, u32 buf_sz, void *os_priv)
{
struct _SHARED_MEMORY share_mem;
if (NULL != vir_addr) {
share_mem.VirtualAddress = (pu1Byte)vir_addr;
share_mem.PhysicalAddressLow = (u4Byte)bus_addr_l;
share_mem.PhysicalAddressHigh = (u4Byte)bus_addr_h;
share_mem.Length = (u4Byte)buf_sz;
#if WIFICX_BASED
share_mem.pltfm_rsvd[0] = os_priv;
#endif
PlatformFreeSharedMemory(d, &share_mem);
}
}
/* phl pre-alloc network layer buffer */
static inline void * _os_alloc_netbuf(void *d, u32 buf_sz, void **os_priv)
{
return NULL; // windows never do this.
}
/* Free netbuf for error case. (ex. drop rx-reorder packet) */
static inline void _os_free_netbuf(void *d, u8 *vir_addr, u32 buf_sz, void *os_priv)
{
}
#ifdef CONFIG_PCI_HCI
static inline void _os_cache_inv(void *d, _dma *bus_addr_l, _dma *bus_addr_h,
u32 buf_sz, u8 direction)
{
}
static inline void _os_cache_wback(void *d, _dma *bus_addr_l,
_dma *bus_addr_h, u32 buf_sz, u8 direction)
{
}
/* txbd, rxbd, wd */
static inline void *_os_shmem_alloc(void *d, _dma *bus_addr_l,
_dma *bus_addr_h, u32 buf_sz,
u8 cache, u8 direction, void **os_rsvd)
{
struct _SHARED_MEMORY share_mem;
PlatformZeroMemory(&share_mem, sizeof(share_mem));
if (PlatformAllocateSharedMemory(d, &share_mem, buf_sz) != RT_STATUS_SUCCESS) {
share_mem.VirtualAddress = NULL;
*bus_addr_l = 0;
*bus_addr_h = 0;
} else {
PlatformZeroMemory(share_mem.VirtualAddress, buf_sz);
*bus_addr_l = (_dma)share_mem.PhysicalAddressLow;
*bus_addr_h = (_dma)share_mem.PhysicalAddressHigh;
#if WIFICX_BASED
*os_rsvd = share_mem.pltfm_rsvd[0];
#endif
}
return (u8 *)share_mem.VirtualAddress;
}
static inline void _os_shmem_free(void *d, u8 *vir_addr, _dma *bus_addr_l,
_dma *bus_addr_h, u32 buf_sz,
u8 cache, u8 direction, void *os_rsvd)
{
struct _SHARED_MEMORY share_mem;
PlatformZeroMemory(&share_mem, sizeof(share_mem));
if (NULL != vir_addr) {
share_mem.VirtualAddress = (pu1Byte)vir_addr;
share_mem.PhysicalAddressLow = (u4Byte)*bus_addr_l;
share_mem.PhysicalAddressHigh = (u4Byte)*bus_addr_h;
share_mem.Length = (u4Byte)buf_sz;
#if WIFICX_BASED
share_mem.pltfm_rsvd[0] = os_rsvd;
#endif
PlatformFreeSharedMemory(d, &share_mem);
}
}
#endif /*CONFIG_PCI_HCI*/
#define _os_mem_alloc(h, buf_sz) _os_mem_alloc_with_tag(h, GenTag(__func__), buf_sz)
/* memory */
static __inline void *_os_mem_alloc_with_tag(void *h, u32 tag, u32 buf_sz)
{
PVOID ptr = NULL;
if (PlatformAllocateMemoryWithTag(tag, &ptr, buf_sz) != RT_STATUS_SUCCESS)
return NULL;
PlatformZeroMemory(ptr, buf_sz);
return ptr;
}
static __inline void _os_mem_free(void *h, void *buf, u32 buf_sz)
{
if(buf)
PlatformFreeMemory(buf, buf_sz);
}
#define _os_kmem_alloc(h, buf_sz) _os_kmem_alloc_with_tag(h, GenTag(__func__), buf_sz)
/*physically contiguous memory if the buffer will be accessed by a DMA device*/
static __inline void *_os_kmem_alloc_with_tag(void *h, u32 tag, u32 buf_sz)
{
PVOID ptr = NULL;
if (PlatformAllocateMemoryWithTag(tag, &ptr, buf_sz) != RT_STATUS_SUCCESS)
return NULL;
PlatformZeroMemory(ptr, buf_sz);
return ptr;
}
/*physically contiguous memory if the buffer will be accessed by a DMA device*/
static __inline void _os_kmem_free(void *h, void *buf, u32 buf_sz)
{
if(buf)
PlatformFreeMemory(buf, buf_sz);
}
/*static __inline void *_os_aligment_mem_alloc(void *h, u32 buf_sz)
{
PALIGNED_SHARED_MEMORY pAlignedSharedMemory=NULL;
PlatformAllocateAlignedSharedMemory()
}*/
static __inline void _os_mem_set(void *h, void *buf, s8 value, u32 buf_sz)
{
PlatformFillMemory(buf, buf_sz, value);
}
static __inline void _os_mem_cpy(void *h, void *dest, void *src, u32 buf_sz)
{
PlatformMoveMemory(dest, src, buf_sz);
}
static __inline int _os_mem_cmp(void *h, void *ptr1, void *ptr2, u32 buf_sz)
{
return PlatformCompareMemory(ptr1, ptr2, buf_sz);
}
/* timer */
static __inline void _os_init_timer(void *drv_priv, _os_timer *timer,
void (*call_back_func)(void *context), void *context,
const char *sz_id)
{
PlatformInitializeTimer(drv_priv, timer, (RT_TIMER_CALL_BACK)call_back_func, context, sz_id);
}
static __inline void _os_set_timer(void *drv_priv, _os_timer *timer, u32 ms_delay)
{
PlatformSetTimer(drv_priv, timer, ms_delay);
}
static __inline void _os_cancel_timer(void *drv_priv, _os_timer *timer)
{
PlatformCancelTimer(drv_priv, timer);
}
static inline void _os_cancel_timer_async(void *drv_priv, _os_timer *timer)
{
PlatformCancelTimer(drv_priv, timer);
}
static __inline void _os_release_timer(void *drv_priv, _os_timer *timer)
{
PlatformReleaseTimer(drv_priv, timer);
}
/* mutex */
static __inline void _os_mutex_init(void *h, _os_mutex *mutex)
{
PlatformInitializeMutex(mutex);
}
static __inline void _os_mutex_deinit(void *h, _os_mutex *mutex)
{
PlatformFreeMutex(mutex);
}
static __inline void _os_mutex_lock(void *h, _os_mutex *mutex)
{
PlatformAcquireMutex(mutex);
}
static __inline void _os_mutex_unlock(void *h, _os_mutex *mutex)
{
PlatformReleaseMutex(mutex);
}
/* Semaphore */
static inline void _os_sema_init(void *h, _os_sema *sema, int int_cnt)
{
PlatformInitializeSemaphore(sema, int_cnt);
}
static inline void _os_sema_free(void *h, _os_sema *sema)
{
PlatformFreeSemaphore(sema);
}
static inline void _os_sema_up(void *h, _os_sema *sema)
{
PlatformReleaseSemaphore(sema);
}
static inline u8 _os_sema_down(void *h, _os_sema *sema)
{
if(PlatformAcquireSemaphore(sema)==RT_STATUS_SUCCESS)
return 0; //// RTW_PHL_STATUS_SUCCESS
else
return 1;
}
/* event */
static __inline void _os_event_init(void *h, _os_event *event)
{
PlatformInitializeEvent(event);
}
static __inline void _os_event_free(void *h, _os_event *event)
{
PlatformFreeEvent(event);
}
static __inline void _os_event_reset(void *h, _os_event *event)
{
PlatformResetEvent(event);
}
static __inline void _os_event_set(void *h, _os_event *event)
{
PlatformSetEvent(event);
}
/*
* m_sec
* == 0 : wait for completion
* > 0 : wait for timeout or completion
* return value
* 0:timeout
* otherwise:success
*/
static __inline int _os_event_wait(void *h, _os_event *event, u32 m_sec)
{
return PlatformWaitEvent(event, m_sec);
}
/* spinlock */
static __inline void _os_spinlock_init(void *d, _os_lock *plock)
{
PLATFORM_INIT_RT_SPINLOCK(*plock);
}
static __inline void _os_spinlock_free(void *d, _os_lock *plock)
{
PLATFORM_FREE_RT_SPINLOCK(*plock);
}
static inline void _os_spinlock(void *d, _os_lock *plock,
enum lock_type type, _os_spinlockfg *flags)
{
PLATFORM_ACQUIRE_RT_SPINLOCK(*plock);
}
static inline void _os_spinunlock(void *d, _os_lock *plock,
enum lock_type type, _os_spinlockfg *flags)
{
PLATFORM_RELEASE_RT_SPINLOCK(*plock);
}
/* tasklet/thread */
static __inline u8 _os_thread_init( void *drv_priv, _os_thread *thread,
int (*call_back_func)(void * context),
void *context,
const char namefmt[])
{
if (PlatformInitializeThread(drv_priv,
thread,
(RT_THREAD_CALL_BACK)call_back_func,
namefmt,
0,
context) == RT_STATUS_SUCCESS) //0: caller will wait for the event indefinitely.
return 0;
else
return 1;
}
static __inline u8 _os_thread_deinit(void *drv_priv, _os_thread *thread)
{
/* Terminate the thread */
PlatformWaitThreadEnd(drv_priv, thread);
PlatformCancelThread(drv_priv, thread);
PlatformReleaseThread(drv_priv, thread);
return 0;
}
static __inline enum rtw_phl_status _os_thread_schedule(void *drv_priv, _os_thread *thread)
{
//PlatformSetEventTrigerThread(drv_priv, thread, PASSIVE_LEVEL, thread->pContext);
PlatformRunThread(drv_priv, thread, PASSIVE_LEVEL);
return RTW_PHL_STATUS_SUCCESS;
}
static inline void _os_thread_stop(void *drv_priv, _os_thread *thread)
{
PlatformSetThreadEnd(drv_priv, thread);
}
static inline int _os_thread_check_stop(void *drv_priv, _os_thread *thread)
{
return PlatformIsThreadEnd(drv_priv, thread);
}
static inline int _os_thread_wait_stop(void *drv_priv, _os_thread *thread)
{
return RTW_PHL_STATUS_SUCCESS;
}
/* Workitem */
static __inline u8 _os_workitem_init(void *drv_priv, _os_workitem *workitem, void (*call_back_func)(void* context), void *context)
{
if (PlatformInitializeWorkItem(drv_priv, workitem, (RT_WORKITEM_CALL_BACK)call_back_func,
context, "phl_workitem") == RT_STATUS_SUCCESS)
{
PlatformStartWorkItem(workitem);
return 0; // RTW_PHL_STATUS_SUCCESS
}
else
return 1;
}
static __inline u8 _os_workitem_deinit(void *drv_priv, _os_workitem *workitem)
{
PlatformStopWorkItem(workitem);
PlatformFreeWorkItem(workitem);
return 0;
}
static __inline u8 _os_workitem_schedule(void *drv_priv, _os_workitem *workitem)
{
if(PlatformScheduleWorkItem(workitem) == TRUE)
return 0; // RTW_PHL_STATUS_SUCCESS
else
return 1;
}
/*
static __inline void _os_workitem_run(void *h, hal_thread *thread)
{
//??
}
static __inline void _os_workitem_kill(void *h, hal_thread *thread)
{
PlatformStopWorkItem(thread);
}
static __inline void _os_workitem_pause(void *h, hal_thread *thread)
{
}
static __inline void _os_workitem_resume(void *h, hal_thread *thread)
{
}
*/
/* tasklet */
static __inline int phl_notify_thread_callback(void *context)
{
struct rtw_phl_handler *handler = (struct rtw_phl_handler *)context;
_os_thread *thread = (_os_thread *) &(handler->os_handler.u.tasklet);
do {
if(_os_sema_down(handler->drv_priv, &(thread->sema)) != RTW_PHL_STATUS_SUCCESS )
break;
if(handler->status & RTW_PHL_HANDLER_STATUS_RELEASED)
break;
handler->callback(thread);
} while (true);
return 0;
}
static __inline u8 _os_tasklet_init(void *drv_priv, _os_tasklet *tasklet, void (*call_back_func)(void *context), void *context)
{
_os_thread *actual_thread = (_os_thread *)tasklet;
struct rtw_phl_handler *handler;
_os_thread_init(drv_priv, actual_thread, phl_notify_thread_callback,
context, "phl_tasklet");
_os_thread_schedule(drv_priv, actual_thread);
handler = (struct rtw_phl_handler *)actual_thread->pContext;
handler->status &= RTW_PHL_HANDLER_STATUS_INITIALIZED;
return 0;
}
static __inline u8 _os_tasklet_deinit(void *drv_priv, _os_tasklet *tasklet)
{
_os_thread *actual_thread = (_os_thread *)tasklet;
struct rtw_phl_handler *handler = (struct rtw_phl_handler *)actual_thread->pContext;
if (handler)
handler->status |= RTW_PHL_HANDLER_STATUS_RELEASED;
_os_sema_up(drv_priv, &(actual_thread->sema));
_os_thread_deinit(drv_priv, actual_thread);
return 0;
}
static __inline enum rtw_phl_status _os_tasklet_schedule(void *drv_priv, _os_tasklet *tasklet)
{
_os_thread *actual_thread = (_os_thread *)tasklet;
_os_sema_up(drv_priv, &(actual_thread->sema));
return RTW_PHL_STATUS_SUCCESS;
}
static _inline int _os_test_and_clear_bit(int nr, unsigned long *addr)
{
/*UNDO*/
return 0;
}
static _inline int _os_test_and_set_bit(int nr, unsigned long *addr)
{
/*UNDO*/
return 1;
}
/* Atomic integer operations */
static __inline void _os_atomic_set(void *d, _os_atomic *v, int i)
{
InterlockedExchange(v, i);
}
static __inline int _os_atomic_read(void *d, _os_atomic *v)
{
return *v;
}
static __inline void _os_atomic_add(void *d, _os_atomic *v, int i)
{
InterlockedExchangeAdd(v, i);
}
static __inline void _os_atomic_sub(void *d, _os_atomic *v, int i)
{
}
static __inline void _os_atomic_inc(void *d, _os_atomic *v)
{
InterlockedIncrement(v);
}
static __inline void _os_atomic_dec(void *d, _os_atomic *v)
{
InterlockedDecrement(v);
}
static __inline int _os_atomic_add_return(void *d, _os_atomic *v, int i)
{
return 0;
}
static __inline int _os_atomic_sub_return(void *d, _os_atomic *v, int i)
{
return 0;
}
static __inline int _os_atomic_inc_return(void *d, _os_atomic *v)
{
return 0;
}
static __inline int _os_atomic_dec_return(void *d, _os_atomic *v)
{
return 0;
}
/* File Operation */
static inline u32 _os_read_file(const char *path, u8 *buf, u32 sz)
{
/* OS Dependent API */
return platform_read_file(path, buf, sz);
}
/*
static __inline bool _os_atomic_inc_unless(void *d, _os_atomic *v, int u)
{
return 0;
}
*/
#ifdef CONFIG_PCI_HCI
static __inline u8 _os_read8_pcie(void *drv_priv, u32 addr)
{
return PlatformEFIORead1Byte(drv_priv, addr);
}
static __inline u16 _os_read16_pcie(void *drv_priv, u32 addr)
{
return PlatformEFIORead2Byte(drv_priv, addr);
}
static __inline u32 _os_read32_pcie(void *drv_priv, u32 addr)
{
return PlatformEFIORead4Byte(drv_priv, addr);
}
static __inline u32 _os_write8_pcie(void *drv_priv, u32 addr, u8 val)
{
PlatformEFIOWrite1Byte(drv_priv, addr, val);
return 0;
}
static __inline u32 _os_write16_pcie(void *drv_priv, u32 addr, u16 val)
{
PlatformEFIOWrite2Byte(drv_priv, addr, val);
return 0;
}
static __inline u32 _os_write32_pcie(void *drv_priv, u32 addr, u32 val)
{
PlatformEFIOWrite4Byte(drv_priv, addr, val);
return 0;
}
#endif/*#ifdef CONFIG_PCI_HCI*/
#ifdef CONFIG_USB_HCI
static __inline u32 _os_usbctrl_vendorreq(void *drv_priv, u8 request, u16 value,
u16 index, void *pdata, u16 len, u8 requesttype)
{
// return value ?? RTW_PHL_STATUS or boolean??
return pltfm_usb_ctrl_vendor_request(
drv_priv,
request,
value,
index,
pdata,
len,
requesttype);
}
static inline int os_usb_tx(void *h, u8 *tx_buf_ptr,
u8 bulk_id, u32 len, u8 *pkt_data_buf)
{
if(pltfm_usb_out_token_send(h, tx_buf_ptr, pkt_data_buf, len, bulk_id) == TRUE)
return 0; // RTW_PHL_STATUS_SUCCESS
else
return 1;
}
static __inline void os_enable_usb_out_pipes(void *drv_priv)
{
pltfm_usb_out_pipes_start(drv_priv);
}
static __inline void os_disable_usb_out_pipes(void *drv_priv)
{
pltfm_usb_out_pipes_stop(drv_priv);
}
static __inline u8 os_out_token_alloc(void *drv_priv)
{
if(core_usb_out_token_init(drv_priv)== TRUE)
return 0; // RTW_PHL_STATUS_SUCCESS
else
return 1;
}
static __inline void os_out_token_free(void *drv_priv)
{
core_usb_out_token_deinit(drv_priv);
}
static __inline u8 os_in_token_alloc(void *drv_priv)
{
if(core_usb_in_token_init(drv_priv)== TRUE)
return 0; // RTW_PHL_STATUS_SUCCESS
else
return 1;
}
static __inline void os_in_token_free(void *drv_priv)
{
core_usb_in_token_deinit(drv_priv);
}
static __inline u8 os_send_usb_in_token(void *drv_priv, void *rxobj, u8 *inbuf, u32 inbuf_len, u8 pipe_idx, u8 minLen)
{
if(pltfm_usb_in_token_send(drv_priv, rxobj, inbuf, inbuf_len, pipe_idx, minLen) == TRUE)
return 0;// RTW_PHL_STATUS_SUCCESS
else
return 1;
}
static __inline void os_enable_usb_in_pipes(void *drv_priv)
{
pltfm_usb_in_pipes_start(drv_priv);
}
static __inline void os_disable_usb_in_pipes(void *drv_priv)
{
pltfm_usb_in_pipes_stop(drv_priv);
}
#endif /*CONFIG_USB_HCI*/
#ifdef CONFIG_SDIO_HCI
static __inline u8 _os_sdio_cmd52_r8(void *d, u32 offset)
{
return PlatformEFSdioLocalCmd52Read1Byte(d, offset);
}
static __inline u8 _os_sdio_cmd53_r8(void *d, u32 offset)
{
return PlatformEFSdioLocalCmd53Read1Byte(d, offset);
}
static __inline u16 _os_sdio_cmd53_r16(void *d, u32 offset)
{
return PlatformEFSdioLocalCmd53Read2Byte(d, offset);
}
static __inline u32 _os_sdio_cmd53_r32(void *d, u32 offset)
{
return PlatformEFSdioLocalCmd53Read4Byte(d, offset);
}
static __inline u8 _os_sdio_cmd53_rn(void *d, u32 offset, u32 size, u8 *data)
{
if (!data){
return _FAIL;
}
PlatformEFSdioLocalCmd53ReadNByte(d, offset, size, (pu1Byte)data);
return _SUCCESS;
}
static __inline u8 _os_sdio_cmd53_r(void *d, u32 offset, u32 size, u8 *data)
{
PlatformEFSdioLocalCmd53ReadNByte(d, offset, size, data);
return _SUCCESS;
}
static __inline void _os_sdio_cmd52_w8(void *d, u32 offset, u8 val)
{
PlatformEFSdioLocalCmd52Write1Byte(d, offset, val);
}
static __inline void _os_sdio_cmd53_w8(void *d, u32 offset, u8 val)
{
PlatformEFSdioLocalCmd53Write1Byte(d, offset, val);
}
static __inline void _os_sdio_cmd53_w16(void *d, u32 offset, u16 val)
{
PlatformEFSdioLocalCmd53Write2Byte(d, offset, val);
}
static __inline void _os_sdio_cmd53_w32(void *d, u32 offset, u32 val)
{
PlatformEFSdioLocalCmd53Write4Byte(d, offset, val);
}
static __inline void _os_sdio_cmd53_wn(void *d, u32 offset, u32 size, u8 *data)
{
PlatformEFSdioLocalCmd53WriteNByte(d, offset, size, (pu1Byte)data);
}
static __inline void _os_sdio_cmd53_w(void *d, u32 offset, u32 size, u8 *data)
{
PlatformEFSdioLocalCmd53WriteNByte(d, offset, size, (pu1Byte)data);
}
static __inline u8 _os_sdio_f0_read(void *d, u32 addr, void *buf, size_t len)
{
return 0;
}
static __inline u8 _os_sdio_read_cia_r8(void *d, u32 addr)
{
return 0;
}
#endif /*CONFIG_SDIO_HCI*/
#endif /*_HAL_PLTFM_WINDOWS_H_*/
|
2301_81045437/rtl8852be
|
phl/pltfm_ops_windows.h
|
C
|
agpl-3.0
| 21,590
|
/******************************************************************************
*
* Copyright(c) 2019 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#ifndef _RTW_GENERAL_DEF_H_
#define _RTW_GENERAL_DEF_H_
#define SEC_CAP_CHK_BMC BIT0
#define BIT0 0x00000001
#define BIT1 0x00000002
#define BIT2 0x00000004
#define BIT3 0x00000008
#define BIT4 0x00000010
#define BIT5 0x00000020
#define BIT6 0x00000040
#define BIT7 0x00000080
#define BIT8 0x00000100
#define BIT9 0x00000200
#define BIT10 0x00000400
#define BIT11 0x00000800
#define BIT12 0x00001000
#define BIT13 0x00002000
#define BIT14 0x00004000
#define BIT15 0x00008000
#define BIT16 0x00010000
#define BIT17 0x00020000
#define BIT18 0x00040000
#define BIT19 0x00080000
#define BIT20 0x00100000
#define BIT21 0x00200000
#define BIT22 0x00400000
#define BIT23 0x00800000
#define BIT24 0x01000000
#define BIT25 0x02000000
#define BIT26 0x04000000
#define BIT27 0x08000000
#define BIT28 0x10000000
#define BIT29 0x20000000
#define BIT30 0x40000000
#define BIT31 0x80000000
#define RTW_U32_MAX 0xFFFFFFFF
enum rtl_ic_id {
RTL8852A,
RTL8834A,
RTL8852B,
RTL8852C,
MAX_IC_ID
};
/* BIT definition for combination */
enum rtw_hci_type {
RTW_HCI_PCIE = BIT0,
RTW_HCI_USB = BIT1,
RTW_HCI_SDIO = BIT2,
RTW_HCI_GSPI = BIT3,
RTW_HCI_MAX,
};
#define SM_PS_STATIC 0
#define SM_PS_DYNAMIC 1
#define SM_PS_INVALID 2
#define SM_PS_DISABLE 3
#define MAC_ADDRESS_LENGTH 6
#define IPV4_ADDRESS_LENGTH 4
#define IPV6_ADDRESS_LENGTH 16
/* Core shall translate system condition into device state for PHL controller */
enum rtw_dev_state {
RTW_DEV_WORKING = BIT0,
RTW_DEV_SUSPENDING = BIT1,
RTW_DEV_RESUMING = BIT2,
RTW_DEV_SURPRISE_REMOVAL = BIT3,
RTW_DEV_MAX
};
enum rtw_rate_mode {
RTW_LEGACY_MODE = 0,
RTW_HT_MODE = 1,
RTW_VHT_MODE = 2,
RTW_HE_MODE = 3
};
enum rtw_data_rate {
RTW_DATA_RATE_CCK1 = 0x0,
RTW_DATA_RATE_CCK2 = 0x1,
RTW_DATA_RATE_CCK5_5 = 0x2,
RTW_DATA_RATE_CCK11 = 0x3,
RTW_DATA_RATE_OFDM6 = 0x4,
RTW_DATA_RATE_OFDM9 = 0x5,
RTW_DATA_RATE_OFDM12 = 0x6,
RTW_DATA_RATE_OFDM18 = 0x7,
RTW_DATA_RATE_OFDM24 = 0x8,
RTW_DATA_RATE_OFDM36 = 0x9,
RTW_DATA_RATE_OFDM48 = 0xA,
RTW_DATA_RATE_OFDM54 = 0xB,
RTW_DATA_RATE_MCS0 = 0x80,
RTW_DATA_RATE_MCS1 = 0x81,
RTW_DATA_RATE_MCS2 = 0x82,
RTW_DATA_RATE_MCS3 = 0x83,
RTW_DATA_RATE_MCS4 = 0x84,
RTW_DATA_RATE_MCS5 = 0x85,
RTW_DATA_RATE_MCS6 = 0x86,
RTW_DATA_RATE_MCS7 = 0x87,
RTW_DATA_RATE_MCS8 = 0x88,
RTW_DATA_RATE_MCS9 = 0x89,
RTW_DATA_RATE_MCS10 = 0x8A,
RTW_DATA_RATE_MCS11 = 0x8B,
RTW_DATA_RATE_MCS12 = 0x8C,
RTW_DATA_RATE_MCS13 = 0x8D,
RTW_DATA_RATE_MCS14 = 0x8E,
RTW_DATA_RATE_MCS15 = 0x8F,
RTW_DATA_RATE_MCS16 = 0x90,
RTW_DATA_RATE_MCS17 = 0x91,
RTW_DATA_RATE_MCS18 = 0x92,
RTW_DATA_RATE_MCS19 = 0x93,
RTW_DATA_RATE_MCS20 = 0x94,
RTW_DATA_RATE_MCS21 = 0x95,
RTW_DATA_RATE_MCS22 = 0x96,
RTW_DATA_RATE_MCS23 = 0x97,
RTW_DATA_RATE_MCS24 = 0x98,
RTW_DATA_RATE_MCS25 = 0x99,
RTW_DATA_RATE_MCS26 = 0x9A,
RTW_DATA_RATE_MCS27 = 0x9B,
RTW_DATA_RATE_MCS28 = 0x9C,
RTW_DATA_RATE_MCS29 = 0x9D,
RTW_DATA_RATE_MCS30 = 0x9E,
RTW_DATA_RATE_MCS31 = 0x9F,
RTW_DATA_RATE_VHT_NSS1_MCS0 = 0x100,
RTW_DATA_RATE_VHT_NSS1_MCS1 = 0x101,
RTW_DATA_RATE_VHT_NSS1_MCS2 = 0x102,
RTW_DATA_RATE_VHT_NSS1_MCS3 = 0x103,
RTW_DATA_RATE_VHT_NSS1_MCS4 = 0x104,
RTW_DATA_RATE_VHT_NSS1_MCS5 = 0x105,
RTW_DATA_RATE_VHT_NSS1_MCS6 = 0x106,
RTW_DATA_RATE_VHT_NSS1_MCS7 = 0x107,
RTW_DATA_RATE_VHT_NSS1_MCS8 = 0x108,
RTW_DATA_RATE_VHT_NSS1_MCS9 = 0x109,
RTW_DATA_RATE_VHT_NSS2_MCS0 = 0x110,
RTW_DATA_RATE_VHT_NSS2_MCS1 = 0x111,
RTW_DATA_RATE_VHT_NSS2_MCS2 = 0x112,
RTW_DATA_RATE_VHT_NSS2_MCS3 = 0x113,
RTW_DATA_RATE_VHT_NSS2_MCS4 = 0x114,
RTW_DATA_RATE_VHT_NSS2_MCS5 = 0x115,
RTW_DATA_RATE_VHT_NSS2_MCS6 = 0x116,
RTW_DATA_RATE_VHT_NSS2_MCS7 = 0x117,
RTW_DATA_RATE_VHT_NSS2_MCS8 = 0x118,
RTW_DATA_RATE_VHT_NSS2_MCS9 = 0x119,
RTW_DATA_RATE_VHT_NSS3_MCS0 = 0x120,
RTW_DATA_RATE_VHT_NSS3_MCS1 = 0x121,
RTW_DATA_RATE_VHT_NSS3_MCS2 = 0x122,
RTW_DATA_RATE_VHT_NSS3_MCS3 = 0x123,
RTW_DATA_RATE_VHT_NSS3_MCS4 = 0x124,
RTW_DATA_RATE_VHT_NSS3_MCS5 = 0x125,
RTW_DATA_RATE_VHT_NSS3_MCS6 = 0x126,
RTW_DATA_RATE_VHT_NSS3_MCS7 = 0x127,
RTW_DATA_RATE_VHT_NSS3_MCS8 = 0x128,
RTW_DATA_RATE_VHT_NSS3_MCS9 = 0x129,
RTW_DATA_RATE_VHT_NSS4_MCS0 = 0x130,
RTW_DATA_RATE_VHT_NSS4_MCS1 = 0x131,
RTW_DATA_RATE_VHT_NSS4_MCS2 = 0x132,
RTW_DATA_RATE_VHT_NSS4_MCS3 = 0x133,
RTW_DATA_RATE_VHT_NSS4_MCS4 = 0x134,
RTW_DATA_RATE_VHT_NSS4_MCS5 = 0x135,
RTW_DATA_RATE_VHT_NSS4_MCS6 = 0x136,
RTW_DATA_RATE_VHT_NSS4_MCS7 = 0x137,
RTW_DATA_RATE_VHT_NSS4_MCS8 = 0x138,
RTW_DATA_RATE_VHT_NSS4_MCS9 = 0x139,
RTW_DATA_RATE_HE_NSS1_MCS0 = 0x180,
RTW_DATA_RATE_HE_NSS1_MCS1 = 0x181,
RTW_DATA_RATE_HE_NSS1_MCS2 = 0x182,
RTW_DATA_RATE_HE_NSS1_MCS3 = 0x183,
RTW_DATA_RATE_HE_NSS1_MCS4 = 0x184,
RTW_DATA_RATE_HE_NSS1_MCS5 = 0x185,
RTW_DATA_RATE_HE_NSS1_MCS6 = 0x186,
RTW_DATA_RATE_HE_NSS1_MCS7 = 0x187,
RTW_DATA_RATE_HE_NSS1_MCS8 = 0x188,
RTW_DATA_RATE_HE_NSS1_MCS9 = 0x189,
RTW_DATA_RATE_HE_NSS1_MCS10 = 0x18A,
RTW_DATA_RATE_HE_NSS1_MCS11 = 0x18B,
RTW_DATA_RATE_HE_NSS2_MCS0 = 0x190,
RTW_DATA_RATE_HE_NSS2_MCS1 = 0x191,
RTW_DATA_RATE_HE_NSS2_MCS2 = 0x192,
RTW_DATA_RATE_HE_NSS2_MCS3 = 0x193,
RTW_DATA_RATE_HE_NSS2_MCS4 = 0x194,
RTW_DATA_RATE_HE_NSS2_MCS5 = 0x195,
RTW_DATA_RATE_HE_NSS2_MCS6 = 0x196,
RTW_DATA_RATE_HE_NSS2_MCS7 = 0x197,
RTW_DATA_RATE_HE_NSS2_MCS8 = 0x198,
RTW_DATA_RATE_HE_NSS2_MCS9 = 0x199,
RTW_DATA_RATE_HE_NSS2_MCS10 = 0x19A,
RTW_DATA_RATE_HE_NSS2_MCS11 = 0x19B,
RTW_DATA_RATE_HE_NSS3_MCS0 = 0x1A0,
RTW_DATA_RATE_HE_NSS3_MCS1 = 0x1A1,
RTW_DATA_RATE_HE_NSS3_MCS2 = 0x1A2,
RTW_DATA_RATE_HE_NSS3_MCS3 = 0x1A3,
RTW_DATA_RATE_HE_NSS3_MCS4 = 0x1A4,
RTW_DATA_RATE_HE_NSS3_MCS5 = 0x1A5,
RTW_DATA_RATE_HE_NSS3_MCS6 = 0x1A6,
RTW_DATA_RATE_HE_NSS3_MCS7 = 0x1A7,
RTW_DATA_RATE_HE_NSS3_MCS8 = 0x1A8,
RTW_DATA_RATE_HE_NSS3_MCS9 = 0x1A9,
RTW_DATA_RATE_HE_NSS3_MCS10 = 0x1AA,
RTW_DATA_RATE_HE_NSS3_MCS11 = 0x1AB,
RTW_DATA_RATE_HE_NSS4_MCS0 = 0x1B0,
RTW_DATA_RATE_HE_NSS4_MCS1 = 0x1B1,
RTW_DATA_RATE_HE_NSS4_MCS2 = 0x1B2,
RTW_DATA_RATE_HE_NSS4_MCS3 = 0x1B3,
RTW_DATA_RATE_HE_NSS4_MCS4 = 0x1B4,
RTW_DATA_RATE_HE_NSS4_MCS5 = 0x1B5,
RTW_DATA_RATE_HE_NSS4_MCS6 = 0x1B6,
RTW_DATA_RATE_HE_NSS4_MCS7 = 0x1B7,
RTW_DATA_RATE_HE_NSS4_MCS8 = 0x1B8,
RTW_DATA_RATE_HE_NSS4_MCS9 = 0x1B9,
RTW_DATA_RATE_HE_NSS4_MCS10 = 0x1BA,
RTW_DATA_RATE_HE_NSS4_MCS11 = 0x1BB,
RTW_DATA_RATE_MAX = 0x1FF
};
enum rtw_gi_ltf {
RTW_GILTF_LGI_4XHE32 = 0,
RTW_GILTF_SGI_4XHE08 = 1,
RTW_GILTF_2XHE16 = 2,
RTW_GILTF_2XHE08 = 3,
RTW_GILTF_1XHE16 = 4,
RTW_GILTF_1XHE08 = 5,
RTW_GILTF_MAX
};
/* 11ax spec define for HE Trigger Frame, only used for HE Trigger Frame! */
enum rtw_gi_ltf_ul_tb {
RTW_TB_GILTF_1XHE16 = 0,
RTW_TB_GILTF_2XHE16 = 1,
RTW_TB_GILTF_4XHE32 = 2,
RTW_TB_GILTF_MAX
};
#define RTW_PHL_MAX_RF_PATH 4
enum rf_path {
RF_PATH_A = 0,
RF_PATH_B = 1,
RF_PATH_C = 2,
RF_PATH_D = 3,
RF_PATH_AB,
RF_PATH_AC,
RF_PATH_AD,
RF_PATH_BC,
RF_PATH_BD,
RF_PATH_CD,
RF_PATH_ABC,
RF_PATH_ABD,
RF_PATH_ACD,
RF_PATH_BCD,
RF_PATH_ABCD,
};
/*HW SPEC & SW/HW CAP*/
#define PROTO_CAP_11B BIT0
#define PROTO_CAP_11G BIT1
#define PROTO_CAP_11N BIT2
#define PROTO_CAP_11AC BIT3
#define PROTO_CAP_11AX BIT4
#define PROTO_CAP_BIT_NUM 4
enum wlan_mode {
WLAN_MD_INVALID = 0,
WLAN_MD_11B = BIT0,
WLAN_MD_11A = BIT1,
WLAN_MD_11G = BIT2,
WLAN_MD_11N = BIT3,
WLAN_MD_11AC = BIT4,
WLAN_MD_11AX = BIT5,
/* Type for current wireless mode */
WLAN_MD_11BG = (WLAN_MD_11B | WLAN_MD_11G),
WLAN_MD_11GN = (WLAN_MD_11G | WLAN_MD_11N),
WLAN_MD_11AN = (WLAN_MD_11A | WLAN_MD_11N),
WLAN_MD_11BN = (WLAN_MD_11B | WLAN_MD_11N),
WLAN_MD_11BGN = (WLAN_MD_11B | WLAN_MD_11G | WLAN_MD_11N),
WLAN_MD_11BGAC = (WLAN_MD_11B | WLAN_MD_11G | WLAN_MD_11AC),
WLAN_MD_11BGAX = (WLAN_MD_11B | WLAN_MD_11G | WLAN_MD_11AX),
WLAN_MD_11GAC = (WLAN_MD_11G | WLAN_MD_11AC),
WLAN_MD_11GAX = (WLAN_MD_11G | WLAN_MD_11AX),
WLAN_MD_11A_AC = (WLAN_MD_11A | WLAN_MD_11AC),
WLAN_MD_11A_AX = (WLAN_MD_11A | WLAN_MD_11AX),
/* Capability -Type for registry default wireless mode */
WLAN_MD_11AGN = (WLAN_MD_11A | WLAN_MD_11G | WLAN_MD_11N ),
WLAN_MD_11ABGN = (WLAN_MD_11A | WLAN_MD_11B | WLAN_MD_11G | WLAN_MD_11N ),
WLAN_MD_11ANAC = (WLAN_MD_11A | WLAN_MD_11N | WLAN_MD_11AC),
WLAN_MD_11BGNAC = (WLAN_MD_11B | WLAN_MD_11G | WLAN_MD_11N | WLAN_MD_11AC),
WLAN_MD_11GNAC = (WLAN_MD_11G | WLAN_MD_11N | WLAN_MD_11AC),
WLAN_MD_24G_MIX = (WLAN_MD_11B | WLAN_MD_11G | WLAN_MD_11N | WLAN_MD_11AC | WLAN_MD_11AX),
WLAN_MD_5G_MIX = (WLAN_MD_11A | WLAN_MD_11N | WLAN_MD_11AC | WLAN_MD_11AX),
WLAN_MD_MAX = (WLAN_MD_24G_MIX|WLAN_MD_5G_MIX),
};
enum band_type {
BAND_ON_24G = 0,
BAND_ON_5G = 1,
BAND_ON_6G = 2,
BAND_MAX,
};
/*HW SPEC & SW/HW CAP*/
#define BAND_CAP_2G BIT(BAND_ON_24G)
#define BAND_CAP_5G BIT(BAND_ON_5G)
#define BAND_CAP_6G BIT(BAND_ON_6G)
#define BAND_CAP_BIT_NUM 3
enum channel_width {
CHANNEL_WIDTH_20 = 0,
CHANNEL_WIDTH_40 = 1,
CHANNEL_WIDTH_80 = 2,
CHANNEL_WIDTH_160 = 3,
CHANNEL_WIDTH_80_80 = 4,
CHANNEL_WIDTH_5 = 5,
CHANNEL_WIDTH_10 = 6,
CHANNEL_WIDTH_MAX = 7,
};
/*HW SPEC & SW/HW CAP*/
#define BW_CAP_20M BIT(CHANNEL_WIDTH_20)
#define BW_CAP_40M BIT(CHANNEL_WIDTH_40)
#define BW_CAP_80M BIT(CHANNEL_WIDTH_80)
#define BW_CAP_160M BIT(CHANNEL_WIDTH_160)
#define BW_CAP_80_80M BIT(CHANNEL_WIDTH_80_80)
#define BW_CAP_5M BIT(CHANNEL_WIDTH_5)
#define BW_CAP_10M BIT(CHANNEL_WIDTH_10)
#define BW_CAP_BIT_NUM 7
/*
* Represent Extention Channel Offset in HT Capabilities
* Secondary Channel Offset
* 0 -SCN, 1 -SCA, 2 -RSVD, 3 - SCB
*
*/
enum chan_offset {
CHAN_OFFSET_NO_EXT = 0, /*SCN - no secondary channel*/
CHAN_OFFSET_UPPER = 1, /*SCA - secondary channel above*/
CHAN_OFFSET_NO_DEF = 2, /*Reserved*/
CHAN_OFFSET_LOWER = 3, /*SCB - secondary channel below*/
};
enum rf_type {
RF_1T1R = 0,
RF_1T2R = 1,
RF_2T2R = 2,
RF_2T3R = 3,
RF_2T4R = 4,
RF_3T3R = 5,
RF_3T4R = 6,
RF_4T4R = 7,
RF_TYPE_MAX,
};
enum rtw_rf_state {
RTW_RF_ON,
RTW_RF_OFF,
RTW_RF_MAX
};
enum rtw_usb_speed {
RTW_USB_SPEED_LOW = 0, /*U2 (2.0)- 1.0 - 1.5 Mbps - 0.192MBs*/
RTW_USB_SPEED_FULL = 1, /*U2 (2.0)- 1.1 - 12 Mbps - 1.5MBs*/
RTW_USB_SPEED_HIGH = 2, /*U2 (2.0)- 2.1 - 480 Mbps - 60MBs*/
RTW_USB_SPEED_SUPER = 3, /*U3 (3.2 Gen 1)- 3.0 - 5 Gbps - 640MBs*/
RTW_USB_SPEED_SUPER_10G = 4, /*U3 (3.2 Gen 2)- 3.1 - 10 Gbps - 1280MBs*/
RTW_USB_SPEED_SUPER_20G = 5, /*U3 (3.2 Gen 2x2)- 3.2 - 20 Gbps - 2560MBs*/
/* keep last */
RTW_USB_SPEED_MAX,
RTW_USB_SPEED_UNKNOWN = RTW_USB_SPEED_MAX,
};
#define USB_SUPER_SPEED_BULK_SIZE 1024 /* usb 3.0 */
#define USB_HIGH_SPEED_BULK_SIZE 512 /* usb 2.0 */
#define USB_FULL_SPEED_BULK_SIZE 64 /* usb 1.1 */
#define IV_LENGTH 8
enum rtw_enc_algo {
RTW_ENC_NONE,
RTW_ENC_WEP40,
RTW_ENC_WEP104,
RTW_ENC_TKIP,
RTW_ENC_WAPI,
RTW_ENC_GCMSMS4,
RTW_ENC_CCMP,
RTW_ENC_CCMP256,
RTW_ENC_GCMP,
RTW_ENC_GCMP256,
RTW_ENC_BIP_CCMP128,
RTW_ENC_MAX
};
enum rtw_sec_ent_mode {
RTW_SEC_ENT_MODE_0, /* No key */
RTW_SEC_ENT_MODE_1, /* WEP */
RTW_SEC_ENT_MODE_2, /* 2 unicast + 3 multicast + 2 BIP keys */
RTW_SEC_ENT_MODE_3, /* 2 unicast + 4 multicast + 1 BIP keys */
};
enum rtw_sec_key_type {
RTW_SEC_KEY_UNICAST,
RTW_SEC_KEY_MULTICAST,
RTW_SEC_KEY_BIP,
RTW_SEC_KEY_MAX
};
/**
* Figure 27-7 + Table 9-31h from Ax Spec D4.2
* B7-B1:
* RU26 : 0 1 2 3 4 5 6 7 8 9 10 ... 36
* RU52 : 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52
* RU106 : 53 54 55 56 57 58 59 60
* RU242 : 61 62 63 64
* RU484 : 65 66
* RU996 : 67
* RU996x2: 68
**/
enum rtw_he_ru_idx {
/* 20MHz - 1 */
RTW_HE_RU26_1 = 0,
RTW_HE_RU26_2,
RTW_HE_RU26_3,
RTW_HE_RU26_4,
RTW_HE_RU26_5,
RTW_HE_RU26_6,
RTW_HE_RU26_7,
RTW_HE_RU26_8,
RTW_HE_RU26_9,
/* 20MHz - 2 */
RTW_HE_RU26_10,
RTW_HE_RU26_11,
RTW_HE_RU26_12,
RTW_HE_RU26_13,
RTW_HE_RU26_14,
RTW_HE_RU26_15,
RTW_HE_RU26_16,
RTW_HE_RU26_17,
RTW_HE_RU26_18,
/* Center 26-tone */
RTW_HE_RU26_19,
/* 20MHz - 3 */
RTW_HE_RU26_20,
RTW_HE_RU26_21,
RTW_HE_RU26_22,
RTW_HE_RU26_23,
RTW_HE_RU26_24,
RTW_HE_RU26_25,
RTW_HE_RU26_26,
RTW_HE_RU26_27,
RTW_HE_RU26_28,
/* 20MHz - 4 */
RTW_HE_RU26_29,
RTW_HE_RU26_30,
RTW_HE_RU26_31,
RTW_HE_RU26_32,
RTW_HE_RU26_33,
RTW_HE_RU26_34,
RTW_HE_RU26_35,
RTW_HE_RU26_36,
RTW_HE_RU26_37 = 36,
/* 20MHz - 1 */
RTW_HE_RU52_1 = 37,
RTW_HE_RU52_2,
RTW_HE_RU52_3,
RTW_HE_RU52_4,
/* 20MHz - 2 */
RTW_HE_RU52_5,
RTW_HE_RU52_6,
RTW_HE_RU52_7,
RTW_HE_RU52_8,
/* 20MHz - 3 */
RTW_HE_RU52_9,
RTW_HE_RU52_10,
RTW_HE_RU52_11,
RTW_HE_RU52_12,
/* 20MHz - 4 */
RTW_HE_RU52_13,
RTW_HE_RU52_14,
RTW_HE_RU52_15,
RTW_HE_RU52_16 = 52,
/* 20MHz - 1 */
RTW_HE_RU106_1 = 53,
RTW_HE_RU106_2,
/* 20MHz - 2 */
RTW_HE_RU106_3,
RTW_HE_RU106_4,
/* 20MHz - 3 */
RTW_HE_RU106_5,
RTW_HE_RU106_6,
/* 20MHz - 4 */
RTW_HE_RU106_7,
RTW_HE_RU106_8 = 60,
/* 20MHz */
RTW_HE_RU242_1 = 61,
RTW_HE_RU242_2,
RTW_HE_RU242_3,
RTW_HE_RU242_4 = 64,
/* 40MHz */
RTW_HE_RU484_1 = 65,
RTW_HE_RU484_2 = 66,
/* 80MHz */
RTW_HE_RU996_1 = 67,
/* 160MHz */
RTW_HE_RU2x996_1 = 68,
};
enum rtw_protect_mode {
RTW_PROTECT_DISABLE = 0,
RTW_PROTECT_RTS = 1,
RTW_PROTECT_CTS2SELF = 2,
RTW_PROTECT_HW_RTS = 3
};
enum rtw_ac {
RTW_AC_BE = 0,
RTW_AC_BK = 1,
RTW_AC_VI = 2,
RTW_AC_VO = 3
};
enum rtw_edcca_mode {
RTW_EDCCA_NORMAL,
RTW_EDCCA_ETSI,
RTW_EDCCA_JP,
RTW_EDCCA_MAX
};
enum rtw_mac_pwr_st {
RTW_MAC_PWR_NONE = 0,
RTW_MAC_PWR_OFF = 1,
RTW_MAC_PWR_ON = 2,
RTW_MAC_PWR_LPS = 3,
RTW_MAC_PWR_MAX = 0x4
};
enum rtw_pcie_bus_func_cap_t {
RTW_PCIE_BUS_FUNC_DISABLE = 0,
RTW_PCIE_BUS_FUNC_ENABLE = 1,
RTW_PCIE_BUS_FUNC_DEFAULT = 2,
RTW_PCIE_BUS_FUNC_IGNORE = 3
};
/* follow mac's definetion, mac_ax_sw_io_mode*/
enum rtw_gpio_mode {
RTW_AX_SW_IO_MODE_INPUT,
RTW_AX_SW_IO_MODE_OUTPUT_OD,
RTW_AX_SW_IO_MODE_OUTPUT_PP,
RTW_AX_SW_IO_MODE_MAX
};
/*MAC_AX_PCIE_L0SDLY_IGNORE = 0xFF, MAC_AX_PCIE_L1DLY_IGNORE = 0xFF, MAC_AX_PCIE_CLKDLY_IGNORE = 0xFF */
#define RTW_PCIE_BUS_ASPM_DLY_IGNORE 0xFF /* Fully controlled by HW */
#define RTW_FRAME_TYPE_MGNT 0
#define RTW_FRAME_TYPE_CTRL 1
#define RTW_FRAME_TYPE_DATA 2
#define RTW_FRAME_TYPE_EXT_RSVD 3
/* Association Related PKT Type + SubType */
#define FRAME_OFFSET_FRAME_CONTROL 0
#define FRAME_OFFSET_DURATION 2
#define FRAME_OFFSET_ADDRESS1 4
#define FRAME_OFFSET_ADDRESS2 10
#define FRAME_OFFSET_ADDRESS3 16
#define FRAME_OFFSET_SEQUENCE 22
#define FRAME_OFFSET_ADDRESS4 24
#define PHL_GET_80211_HDR_TYPE(_hdr) LE_BITS_TO_2BYTE((u8 *)_hdr, 2, 6)
#define PHL_GET_80211_HDR_MORE_FRAG(_hdr) LE_BITS_TO_2BYTE((u8 *)_hdr, 10, 1)
#define PHL_GET_80211_HDR_RETRY(_hdr) LE_BITS_TO_2BYTE((u8 *)_hdr, 11, 1)
#define PHL_GET_80211_HDR_FRAG_NUM(_hdr) LE_BITS_TO_2BYTE((u8 *)_hdr + 22, 0, 4)
#define PHL_GET_80211_HDR_SEQUENCE(_hdr) LE_BITS_TO_2BYTE((u8 *)_hdr + 22, 4, 12)
#define PHL_GET_80211_HDR_ADDRESS2(_d, _hdr, _val) \
_os_mem_cpy(_d, (u8 *)_val, (u8 *)_hdr + FRAME_OFFSET_ADDRESS2, 6)
#define PHL_GET_80211_HDR_ADDRESS3(_d, _hdr, _val) \
_os_mem_cpy(_d, (u8 *)_val, (u8 *)_hdr + FRAME_OFFSET_ADDRESS3, 6)
#define RTW_FRAME_TYPE_BEACON 32
#define RTW_FRAME_TYPE_PROBE_RESP 20
#define RTW_FRAME_TYPE_ASOC_REQ 0
#define RTW_FRAME_TYPE_ASOC_RESP 4
#define RTW_FRAME_TYPE_REASOC_REQ 8
#define RTW_FRAME_TYPE_REASOC_RESP 12
#define RTW_IS_ASOC_PKT(_TYPE) \
((_TYPE == RTW_FRAME_TYPE_REASOC_RESP) || \
(_TYPE == RTW_FRAME_TYPE_REASOC_REQ) || \
(_TYPE == RTW_FRAME_TYPE_ASOC_RESP) || \
(_TYPE == RTW_FRAME_TYPE_ASOC_REQ)) ? true : false
#define RTW_IS_ASOC_REQ_PKT(_TYPE) \
((_TYPE == RTW_FRAME_TYPE_REASOC_REQ) || \
(_TYPE == RTW_FRAME_TYPE_ASOC_REQ)) ? true : false
#define RTW_IS_BEACON_OR_PROBE_RESP_PKT(_TYPE) \
((_TYPE == RTW_FRAME_TYPE_BEACON) || \
(_TYPE == RTW_FRAME_TYPE_PROBE_RESP)) ? true : false
#define TU 1024 /* Time Unit (TU): 1024 us*/
#define RTW_MAX_ETH_PKT_LEN 1536
#endif /*_RTW_GENERAL_DEF_H_*/
|
2301_81045437/rtl8852be
|
phl/rtw_general_def.h
|
C
|
agpl-3.0
| 16,446
|
/******************************************************************************
*
* Copyright(c) 2019 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#define _CMD_DISP_TEST_C_
#include "../phl_headers.h"
#include "../phl_api.h"
#ifdef CONFIG_PHL_TEST_SUITE
#ifdef ENABLE_CMD_DISP_TEST
struct test_case_init {
u8 init_cnt;
u8 start_cnt;
};
struct test_case_msg {
u32 msg_cnt[2];
u8 pending[2];
u8 forcefail;
};
struct test_case_cmd {
u32 token_cnt[2];
u32 cmd_msg[2];
u8 abort[2];
};
struct test_case_prio_msg {
u32 msg_idx;
};
struct test_case_cannot_io {
bool cannot_io;
u32 last_evt;
};
struct test_case_self_def_seq {
u32 seq;
u32 cnt;
};
struct cmd_disp_test_ctx {
struct test_obj_ctrl_interface intf;
struct phl_info_t* phl;
u8 is_test_end;
u8 test_case;
u8 is_pass;
u8 thread_mode;
char rsn[32];
struct phl_bk_module_ops ops;
struct phl_cmd_token_req req;
struct test_case_init case_init;
struct test_case_msg case_msg;
struct test_case_cmd case_cmd;
struct test_case_prio_msg case_prio_msg;
struct test_case_cannot_io case_cannot_io;
struct test_case_self_def_seq case_self_def_seq;
};
struct cmd_disp_test_ctx disp_test_ctx;
enum CMD_DISP_TEST_CASE {
DISP_TEST_INIT = 1,
DISP_TEST_SEND_MSG = 2,
DISP_TEST_CMD_TOKEN_REQ = 3,
DISP_TEST_CANNOT_IO = 4,
DISP_TEST_PRIORITIZE_MSG = 5,
DISP_TEST_SELF_DEF_SEQ = 6
};
char err_rsn[][32] = {
"init eng fail", /*0*/
"register mdl fail", /*1*/
"init mdl fail", /*2*/
"init mdl count err", /*3*/
"start eng fail", /*4*/
"start mdl count err", /*5*/
"stop eng fail", /*6*/
"stop mdl count err", /*7*/
"deinit mdl count err", /*8*/
"msg mdl id chk err", /*9*/
"send msg err", /*10*/
"msg cnt chk err", /*11*/
"msg fail hdl err", /*12*/
"add token err", /*13*/
"acquire token err", /*14*/
"free token chk err", /*15*/
"free token err", /*16*/
"cmd order err", /*17*/
"cancel cmd fail", /*18*/
"pending msg fail", /*19*/
"hook next req fail", /*20*/
"msg order err", /*21*/
"msg status indicator err", /*22*/
"msg completion not received", /*23*/
"set dispr attr fail", /*24*/
"self def seq not match", /*25*/
"none"};
enum phl_mdl_ret_code test_req_acquired(void* dispr, void* priv)
{
u8 idx = 0;
void *d = phl_to_drvpriv(disp_test_ctx.phl);
struct phl_msg msg;
struct phl_msg_attribute attr;
enum rtw_phl_status status = RTW_PHL_STATUS_SUCCESS;
if(disp_test_ctx.test_case == DISP_TEST_CMD_TOKEN_REQ) {
if(phl_dispr_get_idx(dispr, &idx) != RTW_PHL_STATUS_SUCCESS)
return MDL_RET_FAIL;
disp_test_ctx.case_cmd.token_cnt[idx]++;
//wait for 2nd cmd req to send msg
if(disp_test_ctx.case_cmd.token_cnt[idx] == 2) {
_os_mem_set(d, &msg, 0, sizeof(struct phl_msg));
_os_mem_set(d, &attr, 0, sizeof(struct phl_msg_attribute));
SET_MSG_MDL_ID_FIELD(msg.msg_id, PHL_FG_MDL_START + 1);
SET_MSG_EVT_ID_FIELD(msg.msg_id, 125);
msg.band_idx = idx;
status = phl_disp_eng_send_msg(disp_test_ctx.phl, &msg, &attr, NULL);
disp_test_ctx.case_cmd.cmd_msg[idx]++;
}
}
return MDL_RET_SUCCESS;
}
enum phl_mdl_ret_code test_req_abort(void* dispr, void* priv)
{
u8 idx = 0;
struct phl_msg msg = {0};
struct phl_msg_attribute attr = {0};
if(disp_test_ctx.test_case == DISP_TEST_CMD_TOKEN_REQ) {
if(phl_dispr_get_idx(dispr, &idx) != RTW_PHL_STATUS_SUCCESS)
return MDL_RET_FAIL;
disp_test_ctx.case_cmd.abort[idx] = true;
attr.opt = MSG_OPT_SEND_IN_ABORT;
SET_MSG_MDL_ID_FIELD(msg.msg_id, PHL_MDL_PHY_MGNT);
SET_MSG_EVT_ID_FIELD(msg.msg_id, 0x123);
msg.band_idx = idx;
phl_disp_eng_send_msg(disp_test_ctx.phl, &msg, &attr, NULL);
}
return MDL_RET_SUCCESS;
}
enum phl_mdl_ret_code test_req_msg_hdlr(void* dispr, void* priv, struct phl_msg* msg)
{
u8 idx = 0;
if(disp_test_ctx.test_case == DISP_TEST_CMD_TOKEN_REQ) {
if(phl_dispr_get_idx(dispr, &idx) != RTW_PHL_STATUS_SUCCESS)
return MDL_RET_FAIL;
if( MSG_EVT_ID_FIELD(msg->msg_id) == 125)
disp_test_ctx.case_cmd.cmd_msg[idx]++;
}
return MDL_RET_SUCCESS;
}
enum phl_mdl_ret_code test_req_set_info(void* dispr, void* priv,
struct phl_module_op_info* info)
{
return MDL_RET_SUCCESS;
}
enum phl_mdl_ret_code test_req_query_info(void* dispr, void* priv,
struct phl_module_op_info* info)
{
return MDL_RET_SUCCESS;
}
enum phl_mdl_ret_code test_mdl_init(void* phl_info, void* dispr, void** priv)
{
if(disp_test_ctx.test_case == DISP_TEST_INIT)
disp_test_ctx.case_init.init_cnt++;
return MDL_RET_SUCCESS;
}
void test_mdl_deinit(void* dispr, void* priv)
{
if(disp_test_ctx.test_case == DISP_TEST_INIT)
disp_test_ctx.case_init.init_cnt--;
return;
}
enum phl_mdl_ret_code test_mdl_start(void* dispr, void* priv)
{
if(disp_test_ctx.test_case == DISP_TEST_INIT)
disp_test_ctx.case_init.start_cnt++;
return MDL_RET_SUCCESS;
}
enum phl_mdl_ret_code test_mdl_stop(void* dispr, void* priv)
{
if(disp_test_ctx.test_case == DISP_TEST_INIT)
disp_test_ctx.case_init.start_cnt--;
return MDL_RET_SUCCESS;
}
enum phl_mdl_ret_code test_mdl_msg_hdlr(void* dispr, void* priv, struct phl_msg* msg)
{
u8 idx = 0;
void *d = phl_to_drvpriv(disp_test_ctx.phl);
if(disp_test_ctx.test_case == DISP_TEST_SEND_MSG &&
phl_dispr_get_idx(dispr, &idx) == RTW_PHL_STATUS_SUCCESS) {
disp_test_ctx.case_msg.msg_cnt[idx]++;
if(MSG_EVT_ID_FIELD(msg->msg_id) == 4 && disp_test_ctx.case_msg.pending[idx] == false) {
disp_test_ctx.case_msg.pending[idx] = true;
return MDL_RET_PENDING; /* reschedule msg*/
}
if(MSG_EVT_ID_FIELD(msg->msg_id) == 15) {
if(!IS_MSG_FAIL(msg->msg_id)) {
disp_test_ctx.case_msg.forcefail = true;
return MDL_RET_FAIL; /* force msg fail*/
}
else {
disp_test_ctx.case_msg.forcefail = false;
}
}
if(MSG_EVT_ID_FIELD(msg->msg_id) == 10) {
_os_sleep_ms(d, 100);
}
}
if(disp_test_ctx.test_case == DISP_TEST_CMD_TOKEN_REQ) {
if(phl_dispr_get_idx(dispr, &idx) != RTW_PHL_STATUS_SUCCESS)
return MDL_RET_FAIL;
if(MSG_EVT_ID_FIELD(msg->msg_id) == 125 &&
disp_test_ctx.case_cmd.cmd_msg[idx] != 1)
disp_test_ctx.case_cmd.cmd_msg[idx]++;
if(MSG_EVT_ID_FIELD(msg->msg_id) == 0x123) {
SET_MSG_EVT_ID_FIELD(msg->msg_id, 0x124);
return MDL_RET_PENDING;
}
}
if (disp_test_ctx.test_case == DISP_TEST_PRIORITIZE_MSG) {
if(MSG_EVT_ID_FIELD(msg->msg_id) == 1 && IS_MSG_IN_PRE_PHASE(msg->msg_id)) {
disp_test_ctx.case_prio_msg.msg_idx = 1;
_os_sleep_ms(d, 100);
}
if (disp_test_ctx.case_prio_msg.msg_idx + 1 == MSG_EVT_ID_FIELD(msg->msg_id))
disp_test_ctx.case_prio_msg.msg_idx = MSG_EVT_ID_FIELD(msg->msg_id);
}
return MDL_RET_SUCCESS;
}
enum phl_mdl_ret_code test_mdl_set_info(void* dispr, void* priv,
struct phl_module_op_info* info)
{
return MDL_RET_SUCCESS;
}
enum phl_mdl_ret_code test_mdl_query_info(void* dispr, void* priv,
struct phl_module_op_info* info)
{
return MDL_RET_SUCCESS;
}
enum phl_mdl_ret_code test_btc_mdl_msg_hdlr(void* dispr, void* priv, struct phl_msg* msg)
{
if(disp_test_ctx.test_case == DISP_TEST_SELF_DEF_SEQ) {
disp_test_ctx.case_self_def_seq.cnt++;
if (IS_MSG_IN_PRE_PHASE(msg->msg_id)) {
if (disp_test_ctx.case_self_def_seq.seq == 1)
disp_test_ctx.case_self_def_seq.seq = 2;
}
else {
if (disp_test_ctx.case_self_def_seq.seq == 6)
disp_test_ctx.case_self_def_seq.seq = 7;
}
}
return MDL_RET_SUCCESS;
}
/************************************************************************/
enum phl_mdl_ret_code test_gen_mdl_msg_hdlr(void* dispr, void* priv, struct phl_msg* msg)
{
if(disp_test_ctx.test_case == DISP_TEST_SELF_DEF_SEQ) {
disp_test_ctx.case_self_def_seq.cnt++;
if (IS_MSG_IN_PRE_PHASE(msg->msg_id)) {
if (disp_test_ctx.case_self_def_seq.seq == 2)
disp_test_ctx.case_self_def_seq.seq = 3;
}
else {
if (disp_test_ctx.case_self_def_seq.seq == 4)
disp_test_ctx.case_self_def_seq.seq = 5;
}
}
return MDL_RET_SUCCESS;
}
/************************************************************************/
enum phl_mdl_ret_code test_mcc_mdl_msg_hdlr(void* dispr, void* priv, struct phl_msg* msg)
{
if(disp_test_ctx.test_case == DISP_TEST_SELF_DEF_SEQ) {
disp_test_ctx.case_self_def_seq.cnt++;
if (IS_MSG_IN_PRE_PHASE(msg->msg_id)) {
if (disp_test_ctx.case_self_def_seq.seq == 3)
disp_test_ctx.case_self_def_seq.seq = 4;
}
else {
if (disp_test_ctx.case_self_def_seq.seq == 5)
disp_test_ctx.case_self_def_seq.seq = 6;
}
}
return MDL_RET_SUCCESS;
}
/************************************************************************/
enum phl_mdl_ret_code test_ser_mdl_msg_hdlr(void* dispr, void* priv, struct phl_msg* msg)
{
void *d = phl_to_drvpriv(disp_test_ctx.phl);
if(disp_test_ctx.test_case == DISP_TEST_SELF_DEF_SEQ) {
disp_test_ctx.case_self_def_seq.cnt++;
if (IS_MSG_IN_PRE_PHASE(msg->msg_id)) {
if (disp_test_ctx.case_self_def_seq.seq == 0)
disp_test_ctx.case_self_def_seq.seq = 1;
if (disp_test_ctx.case_self_def_seq.seq == 8)
_os_delay_ms(d, 100);
}
else {
if (disp_test_ctx.case_self_def_seq.seq == 7)
disp_test_ctx.case_self_def_seq.seq = 8;
}
}
return MDL_RET_SUCCESS;
}
void disp_init_test( void )
{
u8 i = 0;
u8 mdl_cnt = 10;
void *d = phl_to_drvpriv(disp_test_ctx.phl);
enum rtw_phl_status status = RTW_PHL_STATUS_SUCCESS;
struct phl_cmd_dispatch_engine *disp_eng = NULL;
disp_test_ctx.is_pass = true;
u32 offset[] = {PHL_BK_MDL_ROLE_START, PHL_BK_MDL_OPT_START, PHL_BK_MDL_MDRY_START};
u32 idx = 0;
for(i = 0; i < 5; i++) {
status = phl_disp_eng_init(disp_test_ctx.phl, 3);
if( (!i && status != RTW_PHL_STATUS_SUCCESS) ||
(i && status == RTW_PHL_STATUS_SUCCESS)) {
disp_test_ctx.is_pass = false;
_os_mem_cpy(d, disp_test_ctx.rsn, err_rsn[0], sizeof(err_rsn[0]));
goto test_err;
}
}
disp_eng = &(disp_test_ctx.phl->disp_eng);
for( i = 0 ; i < mdl_cnt; i++) {
idx = i % PHL_MDL_PRI_MAX;
status = dispr_register_module(disp_eng->dispatcher[2],
i+offset[idx],
&(disp_test_ctx.ops));
if(status != RTW_PHL_STATUS_SUCCESS) {
disp_test_ctx.is_pass = false;
_os_mem_cpy(d, disp_test_ctx.rsn, err_rsn[1], sizeof(err_rsn[1]));
goto test_err;
}
}
status = phl_disp_eng_start(disp_test_ctx.phl);
if(status != RTW_PHL_STATUS_SUCCESS ) {
disp_test_ctx.is_pass = false;
_os_mem_cpy(d, disp_test_ctx.rsn, err_rsn[4], sizeof(err_rsn[4]));
goto test_err;
}
if(disp_test_ctx.case_init.start_cnt != mdl_cnt) {
disp_test_ctx.is_pass = false;
_os_mem_cpy(d, disp_test_ctx.rsn, err_rsn[5], sizeof(err_rsn[5]));
goto test_err;
}
status = phl_disp_eng_stop(disp_test_ctx.phl);
if(status != RTW_PHL_STATUS_SUCCESS ) {
disp_test_ctx.is_pass = false;
_os_mem_cpy(d, disp_test_ctx.rsn, err_rsn[6], sizeof(err_rsn[6]));
goto test_err;
}
if(disp_test_ctx.case_init.start_cnt) {
disp_test_ctx.is_pass = false;
_os_mem_cpy(d, disp_test_ctx.rsn, err_rsn[7], sizeof(err_rsn[7]));
goto test_err;
}
status = phl_disp_eng_deinit(disp_test_ctx.phl);
if( status != RTW_PHL_STATUS_SUCCESS)
disp_test_ctx.is_pass = false;
if(disp_test_ctx.case_init.init_cnt) {
disp_test_ctx.is_pass = false;
_os_mem_cpy(d, disp_test_ctx.rsn, err_rsn[8], sizeof(err_rsn[8]));
}
goto test_end;
test_err:
status = phl_disp_eng_deinit(disp_test_ctx.phl);
test_end:
disp_test_ctx.is_test_end = true;
}
void disp_send_msg_test( void )
{
u32 i = 0;
struct phl_msg msg;
struct phl_msg_attribute attr;
u8 mdl_cnt = 10;
u8 phy_num = 2;
void *d = phl_to_drvpriv(disp_test_ctx.phl);
enum rtw_phl_status status = RTW_PHL_STATUS_SUCCESS;
struct phl_cmd_dispatch_engine *disp_eng = NULL;
u8 man_num = 0;
u8 opt_num = 0;
u8 role_num = 0;
u8 total_num = 0;
u32 msg_hdl = 0;
u32 offset[] = {PHL_BK_MDL_ROLE_START, PHL_BK_MDL_OPT_START, PHL_BK_MDL_MDRY_START};
u32 idx = 0;
u32 tmp = 0;
disp_test_ctx.is_pass = true;
status = phl_disp_eng_init(disp_test_ctx.phl, phy_num);
if(status != RTW_PHL_STATUS_SUCCESS) {
disp_test_ctx.is_pass = false;
_os_mem_cpy(d, disp_test_ctx.rsn, err_rsn[0], sizeof(err_rsn[0]));
goto test_end;
}
disp_eng = &(disp_test_ctx.phl->disp_eng);
status = phl_disp_eng_start(disp_test_ctx.phl);
if(status != RTW_PHL_STATUS_SUCCESS ) {
disp_test_ctx.is_pass = false;
_os_mem_cpy(d, disp_test_ctx.rsn, err_rsn[4], sizeof(err_rsn[4]));
goto test_end;
}
_os_mem_set(d, &msg, 0, sizeof(struct phl_msg));
_os_mem_set(d, &attr, 0, sizeof(struct phl_msg_attribute));
/* check recycle flow */
for( i = 0 ; i < 100; i++) {
if( !(i % 10 ))
SET_MSG_MDL_ID_FIELD(msg.msg_id, 10);
else
SET_MSG_MDL_ID_FIELD(msg.msg_id, 0);
SET_MSG_EVT_ID_FIELD(msg.msg_id, i);
msg.band_idx = 0;
status = phl_disp_eng_send_msg(disp_test_ctx.phl, &msg, &attr, NULL);
if((!(i %10) && status == RTW_PHL_STATUS_SUCCESS) ||
((i %10) && status == RTW_PHL_STATUS_FAILURE)) {
disp_test_ctx.is_pass = false;
_os_mem_cpy(d, disp_test_ctx.rsn, err_rsn[9], sizeof(err_rsn[9]));
goto test_end;
}
msg.band_idx = 1;
status = phl_disp_eng_send_msg(disp_test_ctx.phl, &msg, &attr, NULL);
if((!(i %10) && status == RTW_PHL_STATUS_SUCCESS) ||
((i %10) && status == RTW_PHL_STATUS_FAILURE)) {
disp_test_ctx.is_pass = false;
_os_mem_cpy(d, disp_test_ctx.rsn, err_rsn[9], sizeof(err_rsn[9]));
goto test_end;
}
}
_os_sleep_ms(d, 100);
/* check dispatch flow */
for( i = 0 ; i < mdl_cnt; i++) {
/* num of wifi role module: 4 (10, 13, 16, 19)
* num of optional module: 3 (72, 75, 78)
* num of mandatory module: 3 (43, 46, 49)
* for modules at mandatory & wifi role priority,
* they would receive every msg
* for modules at optional priority,
* they only receive msg when bitmap is set in phl_disp_eng_send_msg
*/
man_num = 3;
opt_num = 3;
role_num = 4;
idx = i % PHL_MDL_PRI_MAX;
status = dispr_register_module(disp_eng->dispatcher[0],
i+offset[idx],
&(disp_test_ctx.ops));
if(status != RTW_PHL_STATUS_SUCCESS) {
disp_test_ctx.is_pass = false;
_os_mem_cpy(d, disp_test_ctx.rsn, err_rsn[1], sizeof(err_rsn[1]));
goto test_end;
}
status = dispr_register_module(disp_eng->dispatcher[1],
i+offset[idx],
&(disp_test_ctx.ops));
if(status != RTW_PHL_STATUS_SUCCESS) {
disp_test_ctx.is_pass = false;
_os_mem_cpy(d, disp_test_ctx.rsn, err_rsn[1], sizeof(err_rsn[1]));
goto test_end;
}
}
for( i = 0 ; i < mdl_cnt; i++) {
idx = i % PHL_MDL_PRI_MAX;
SET_MSG_MDL_ID_FIELD(msg.msg_id, i+offset[idx]);
SET_MSG_EVT_ID_FIELD(msg.msg_id, i);
if(i == 3)
tmp = 72;
else if( i == 6)
tmp = 78;
else
tmp = 0;
attr.notify.id_arr = (u8*)&tmp;
attr.notify.len = 1;
attr.opt |= MSG_OPT_SKIP_NOTIFY_OPT_MDL;
msg.band_idx = 0;
status = phl_disp_eng_send_msg(disp_test_ctx.phl, &msg, &attr, NULL);
if(status != RTW_PHL_STATUS_SUCCESS) {
disp_test_ctx.is_pass = false;
_os_mem_cpy(d, disp_test_ctx.rsn, err_rsn[10], sizeof(err_rsn[10]));
goto test_end;
}
msg.band_idx = 1;
phl_disp_eng_send_msg(disp_test_ctx.phl, &msg, &attr, NULL);
}
attr.opt = 0;
attr.notify.id_arr = NULL;
attr.notify.len = 0;
_os_sleep_ms(d, 100);
total_num = 2* (((man_num+role_num) * mdl_cnt) + 3 + 2) + 1;
if(disp_test_ctx.case_msg.msg_cnt[0] == total_num ||
disp_test_ctx.case_msg.msg_cnt[1] == total_num) {
disp_test_ctx.is_pass = false;
_os_mem_cpy(d, disp_test_ctx.rsn, err_rsn[11], sizeof(err_rsn[19]));
goto test_end;
}
phl_disp_eng_clr_pending_msg(disp_test_ctx.phl, 0);
phl_disp_eng_clr_pending_msg(disp_test_ctx.phl, 1);
/*
* we send 10 msgs above & expect the msg cnt to be as below
* total = 2 * ((man_num + role_num) * msg_cnt + 3 + 2) + 1
* regarding multiply * 2, since we have pre_phase & post_phase handle
* 3: opt module number
* 2: when module id = {13, 16}, we indicate one optional module to notify
* 1: since module id 43 should be entered twice because of reschedule
*/
_os_sleep_ms(d, 100);
disp_test_ctx.case_msg.pending[0] = false;
disp_test_ctx.case_msg.pending[1] = false;
total_num = 2* (((man_num+role_num) * mdl_cnt) + 3 + 2) + 1;
if(disp_test_ctx.case_msg.msg_cnt[0] != total_num ||
disp_test_ctx.case_msg.msg_cnt[1] != total_num) {
disp_test_ctx.is_pass = false;
_os_mem_cpy(d, disp_test_ctx.rsn, err_rsn[11], sizeof(err_rsn[11]));
goto test_end;
}
SET_MSG_MDL_ID_FIELD(msg.msg_id, 78);
SET_MSG_EVT_ID_FIELD(msg.msg_id, 15);
msg.band_idx = 0;
phl_disp_eng_send_msg(disp_test_ctx.phl, &msg, &attr, NULL);
_os_sleep_ms(d, 10);
if(disp_test_ctx.case_msg.forcefail == true) {
disp_test_ctx.is_pass = false;
_os_mem_cpy(d, disp_test_ctx.rsn, err_rsn[11], sizeof(err_rsn[11]));
goto test_end;
}
SET_MSG_MDL_ID_FIELD(msg.msg_id, 78);
SET_MSG_EVT_ID_FIELD(msg.msg_id, 10);
phl_disp_eng_send_msg(disp_test_ctx.phl, &msg, &attr, &msg_hdl);
_os_sleep_ms(d, 100);
phl_disp_eng_cancel_msg(disp_test_ctx.phl, 0, &msg_hdl);
test_end:
status = phl_disp_eng_stop(disp_test_ctx.phl);
status = phl_disp_eng_deinit(disp_test_ctx.phl);
disp_test_ctx.is_test_end = true;
}
void disp_cmd_token_test( void )
{
u32 i = 0;
u32 j = 0;
u8 phy_num = 2;
void *d = phl_to_drvpriv(disp_test_ctx.phl);
enum rtw_phl_status status = RTW_PHL_STATUS_SUCCESS;
struct phl_cmd_dispatch_engine *disp_eng = NULL;
u32 req_hdl[2][10] = {0};
disp_test_ctx.is_pass = true;
status = phl_disp_eng_init(disp_test_ctx.phl, phy_num);
if(status != RTW_PHL_STATUS_SUCCESS) {
disp_test_ctx.is_pass = false;
_os_mem_cpy(d, disp_test_ctx.rsn, err_rsn[0], sizeof(err_rsn[0]));
goto test_end;
}
disp_eng = &(disp_test_ctx.phl->disp_eng);
status = phl_disp_eng_start(disp_test_ctx.phl);
if(status != RTW_PHL_STATUS_SUCCESS ) {
disp_test_ctx.is_pass = false;
_os_mem_cpy(d, disp_test_ctx.rsn, err_rsn[4], sizeof(err_rsn[4]));
goto test_end;
}
for( i = 0 ; i < phy_num ; i++) {
status = dispr_register_module(disp_eng->dispatcher[i],
PHL_BK_MDL_MDRY_START,
&(disp_test_ctx.ops));
if(status != RTW_PHL_STATUS_SUCCESS) {
disp_test_ctx.is_pass = false;
_os_mem_cpy(d, disp_test_ctx.rsn, err_rsn[1], sizeof(err_rsn[1]));
goto test_end;
}
for( j = 0 ; j < 10 ; j++) {
disp_test_ctx.req.module_id = (u8)(PHL_FG_MDL_START + j);
status = phl_disp_eng_add_token_req(disp_test_ctx.phl, (u8)i, &(disp_test_ctx.req), &(req_hdl[i][j]));
_os_sleep_ms(d, 1);
if( (j == 0 && status != RTW_PHL_STATUS_SUCCESS ) ||
( j && j < 8 && status != RTW_PHL_STATUS_PENDING) ||
(j >= 8 && status != RTW_PHL_STATUS_RESOURCE)) {
PHL_INFO("[DISP_TEST] idx: %d status:%d\n", j, status);
disp_test_ctx.is_pass = false;
_os_mem_cpy(d, disp_test_ctx.rsn, err_rsn[13], sizeof(err_rsn[13]));
goto test_end;
}
}
}
_os_sleep_ms(d, 100);
if(disp_test_ctx.case_cmd.token_cnt[0] != 1||
disp_test_ctx.case_cmd.token_cnt[1] != 1) {
disp_test_ctx.is_pass = false;
_os_mem_cpy(d, disp_test_ctx.rsn, err_rsn[14], sizeof(err_rsn[14]));
goto test_end;
}
for(i = 0 ; i < phy_num; i++) {
status = phl_disp_eng_free_token(disp_test_ctx.phl, (u8)i, &(req_hdl[i][1]));
if(status == RTW_PHL_STATUS_SUCCESS ) {
disp_test_ctx.is_pass = false;
_os_mem_cpy(d, disp_test_ctx.rsn, err_rsn[15], sizeof(err_rsn[15]));
goto test_end;
}
status = phl_disp_eng_free_token(disp_test_ctx.phl, (u8)i, &(req_hdl[i][0]));
if(status != RTW_PHL_STATUS_SUCCESS ) {
disp_test_ctx.is_pass = false;
_os_mem_cpy(d, disp_test_ctx.rsn, err_rsn[16], sizeof(err_rsn[16]));
goto test_end;
}
}
_os_sleep_ms(d, 100);
if(disp_test_ctx.case_cmd.cmd_msg[0] != 2 ||
disp_test_ctx.case_cmd.cmd_msg[1] != 2) {
disp_test_ctx.is_pass = false;
_os_mem_cpy(d, disp_test_ctx.rsn, err_rsn[17], sizeof(err_rsn[17]));
goto test_end;
}
for(i = 0 ; i < phy_num; i++) {
disp_test_ctx.case_cmd.abort[i] = false;
phl_disp_eng_cancel_token_req(disp_test_ctx.phl, (u8)i, &(req_hdl[i][1]));
_os_sleep_ms(d, 100);
if(disp_test_ctx.case_cmd.abort[i] == false ||
disp_test_ctx.case_cmd.token_cnt[i] == 3) {
disp_test_ctx.is_pass = false;
_os_mem_cpy(d, disp_test_ctx.rsn, err_rsn[18], sizeof(err_rsn[18]));
goto test_end;
}
phl_disp_eng_clr_pending_msg(disp_test_ctx.phl, (u8)i);
_os_sleep_ms(d, 100);
if( disp_test_ctx.case_cmd.token_cnt[i] != 3) {
disp_test_ctx.is_pass = false;
_os_mem_cpy(d, disp_test_ctx.rsn, err_rsn[20], sizeof(err_rsn[20]));
goto test_end;
}
}
test_end:
status = phl_disp_eng_stop(disp_test_ctx.phl);
status = phl_disp_eng_deinit(disp_test_ctx.phl);
disp_test_ctx.is_test_end = true;
}
void disp_prioritize_msg_test( void )
{
u32 i = 0;
struct phl_msg msg = {0};
struct phl_msg_attribute attr = {0};
u8 ctrl_msg_cnt = 4;
u8 msg_cnt = 10;
u8 ctrl_msg_start = 2;
u8 msg_start = ctrl_msg_start + ctrl_msg_cnt;
u8 phy_num = 1;
void *d = phl_to_drvpriv(disp_test_ctx.phl);
enum rtw_phl_status status = RTW_PHL_STATUS_SUCCESS;
struct phl_cmd_dispatch_engine *disp_eng = NULL;
disp_test_ctx.is_pass = true;
status = phl_disp_eng_init(disp_test_ctx.phl, phy_num);
if(status != RTW_PHL_STATUS_SUCCESS) {
disp_test_ctx.is_pass = false;
_os_mem_cpy(d, disp_test_ctx.rsn, err_rsn[0], sizeof(err_rsn[0]));
goto test_end;
}
disp_eng = &(disp_test_ctx.phl->disp_eng);
status = phl_disp_eng_start(disp_test_ctx.phl);
if(status != RTW_PHL_STATUS_SUCCESS ) {
disp_test_ctx.is_pass = false;
_os_mem_cpy(d, disp_test_ctx.rsn, err_rsn[4], sizeof(err_rsn[4]));
goto test_end;
}
/* check dispatch flow */
status = dispr_register_module(disp_eng->dispatcher[0],
PHL_MDL_GENERAL,
&(disp_test_ctx.ops));
if(status != RTW_PHL_STATUS_SUCCESS) {
disp_test_ctx.is_pass = false;
_os_mem_cpy(d, disp_test_ctx.rsn, err_rsn[1], sizeof(err_rsn[1]));
goto test_end;
}
SET_MSG_MDL_ID_FIELD(msg.msg_id, PHL_MDL_GENERAL);
SET_MSG_EVT_ID_FIELD(msg.msg_id, 1);
msg.band_idx = 0;
status = phl_disp_eng_send_msg(disp_test_ctx.phl, &msg, &attr, NULL);
if(status != RTW_PHL_STATUS_SUCCESS) {
disp_test_ctx.is_pass = false;
_os_mem_cpy(d, disp_test_ctx.rsn, err_rsn[10], sizeof(err_rsn[10]));
goto test_end;
}
_os_sleep_ms(d, 10);
for( i = 0 ; i < (u32) (msg_cnt + ctrl_msg_cnt); i++) {
if (i % 3) {
SET_MSG_MDL_ID_FIELD(msg.msg_id, PHL_MDL_GENERAL);
SET_MSG_EVT_ID_FIELD(msg.msg_id, msg_start);
msg_start++;
}
else {
SET_MSG_MDL_ID_FIELD(msg.msg_id, PHL_MDL_PHY_MGNT);
SET_MSG_EVT_ID_FIELD(msg.msg_id, ctrl_msg_start);
ctrl_msg_start++;
}
status = phl_disp_eng_send_msg(disp_test_ctx.phl, &msg, &attr, NULL);
}
_os_sleep_ms(d, 200);
PHL_INFO("[DISP_TEST] msg_idx:0x%x\n",disp_test_ctx.case_prio_msg.msg_idx);
if(disp_test_ctx.case_prio_msg.msg_idx != (u32) (msg_start - 1)) {
disp_test_ctx.is_pass = false;
_os_mem_cpy(d, disp_test_ctx.rsn, err_rsn[21], sizeof(err_rsn[21]));
goto test_end;
}
test_end:
status = phl_disp_eng_stop(disp_test_ctx.phl);
status = phl_disp_eng_deinit(disp_test_ctx.phl);
disp_test_ctx.is_test_end = true;
}
void _msg_completion(void* priv, struct phl_msg* msg)
{
void *d = phl_to_drvpriv(disp_test_ctx.phl);
PHL_INFO("[DISP_TEST] msg_id:0x%x\n", msg->msg_id);
if (MSG_EVT_ID_FIELD(msg->msg_id) == 10) {
if ( (!IS_MSG_CANCEL(msg->msg_id) || !IS_MSG_CANNOT_IO(msg->msg_id))) {
disp_test_ctx.is_pass = false;
_os_mem_cpy(d, disp_test_ctx.rsn, err_rsn[22], sizeof(err_rsn[22]));
}
}
else {
disp_test_ctx.case_cannot_io.last_evt = MSG_EVT_ID_FIELD(msg->msg_id);
if (disp_test_ctx.case_cannot_io.cannot_io == true) {
disp_test_ctx.is_pass = false;
_os_mem_cpy(d, disp_test_ctx.rsn, err_rsn[19], sizeof(err_rsn[19]));
}
else if (IS_MSG_CANCEL(msg->msg_id) || IS_MSG_FAIL(msg->msg_id) || IS_MSG_CANNOT_IO(msg->msg_id)) {
disp_test_ctx.is_pass = false;
_os_mem_cpy(d, disp_test_ctx.rsn, err_rsn[22], sizeof(err_rsn[22]));
}
}
}
void disp_cannot_io_test( void )
{
struct phl_msg msg = {0};
struct phl_msg_attribute attr = {0};
u8 phy_num = 1;
void *d = phl_to_drvpriv(disp_test_ctx.phl);
enum rtw_phl_status status = RTW_PHL_STATUS_SUCCESS;
struct phl_cmd_dispatch_engine *disp_eng = NULL;
disp_test_ctx.is_pass = true;
status = phl_disp_eng_init(disp_test_ctx.phl, phy_num);
if(status != RTW_PHL_STATUS_SUCCESS) {
disp_test_ctx.is_pass = false;
_os_mem_cpy(d, disp_test_ctx.rsn, err_rsn[0], sizeof(err_rsn[0]));
goto test_end;
}
disp_eng = &(disp_test_ctx.phl->disp_eng);
status = phl_disp_eng_start(disp_test_ctx.phl);
if(status != RTW_PHL_STATUS_SUCCESS ) {
disp_test_ctx.is_pass = false;
_os_mem_cpy(d, disp_test_ctx.rsn, err_rsn[4], sizeof(err_rsn[4]));
goto test_end;
}
disp_test_ctx.case_cannot_io.cannot_io = true;
phl_disp_eng_notify_dev_io_status(disp_test_ctx.phl, HW_BAND_MAX, 0, false);
SET_MSG_MDL_ID_FIELD(msg.msg_id, PHL_MDL_PHY_MGNT);
SET_MSG_EVT_ID_FIELD(msg.msg_id, 10);
msg.band_idx = 0;
attr.completion.completion = _msg_completion;
attr.completion.priv = &disp_test_ctx;
status = phl_disp_eng_send_msg(disp_test_ctx.phl, &msg, &attr, NULL);
if(status != RTW_PHL_STATUS_SUCCESS) {
disp_test_ctx.is_pass = false;
_os_mem_cpy(d, disp_test_ctx.rsn, err_rsn[10], sizeof(err_rsn[10]));
goto test_end;
}
_os_sleep_ms(d, 100);
if (disp_test_ctx.is_pass == false)
goto test_end;
SET_MSG_EVT_ID_FIELD(msg.msg_id, 20);
attr.opt |= MSG_OPT_PENDING_DURING_CANNOT_IO;
status = phl_disp_eng_send_msg(disp_test_ctx.phl, &msg, &attr, NULL);
if(status != RTW_PHL_STATUS_SUCCESS) {
disp_test_ctx.is_pass = false;
_os_mem_cpy(d, disp_test_ctx.rsn, err_rsn[10], sizeof(err_rsn[10]));
goto test_end;
}
_os_sleep_ms(d, 100);
if (disp_test_ctx.is_pass == false)
goto test_end;
disp_test_ctx.case_cannot_io.cannot_io = false;
phl_disp_eng_notify_dev_io_status(disp_test_ctx.phl, HW_BAND_MAX, 0, true);
_os_sleep_ms(d, 100);
if (disp_test_ctx.case_cannot_io.last_evt != 20) {
disp_test_ctx.is_pass = false;
_os_mem_cpy(d, disp_test_ctx.rsn, err_rsn[23], sizeof(err_rsn[23]));
}
test_end:
status = phl_disp_eng_stop(disp_test_ctx.phl);
status = phl_disp_eng_deinit(disp_test_ctx.phl);
disp_test_ctx.is_test_end = true;
}
void disp_self_def_msg_test( void )
{
u32 i = 0;
struct phl_msg msg;
struct phl_msg_attribute attr;
u8 phy_num = 2;
void *d = phl_to_drvpriv(disp_test_ctx.phl);
enum rtw_phl_status status = RTW_PHL_STATUS_SUCCESS;
struct phl_cmd_dispatch_engine *disp_eng = NULL;
struct msg_self_def_seq seq = {0};
u8 pre_proct_seq[] = {PHL_MDL_BTC, PHL_MDL_GENERAL, PHL_MDL_MRC, PHL_MDL_SER, PHL_MDL_POWER_MGNT};
u8 post_proct_seq[] = {PHL_MDL_GENERAL, PHL_MDL_MRC, PHL_MDL_SER, PHL_MDL_POWER_MGNT};
disp_test_ctx.is_pass = true;
status = phl_disp_eng_init(disp_test_ctx.phl, phy_num);
if(status != RTW_PHL_STATUS_SUCCESS) {
disp_test_ctx.is_pass = false;
_os_mem_cpy(d, disp_test_ctx.rsn, err_rsn[0], sizeof(err_rsn[0]));
goto test_end;
}
disp_eng = &(disp_test_ctx.phl->disp_eng);
status = phl_disp_eng_start(disp_test_ctx.phl);
if(status != RTW_PHL_STATUS_SUCCESS ) {
disp_test_ctx.is_pass = false;
_os_mem_cpy(d, disp_test_ctx.rsn, err_rsn[4], sizeof(err_rsn[4]));
goto test_end;
}
disp_test_ctx.ops.msg_hdlr = test_btc_mdl_msg_hdlr;
if(dispr_register_module(disp_eng->dispatcher[0], PHL_MDL_BTC, &(disp_test_ctx.ops)) != RTW_PHL_STATUS_SUCCESS) {
disp_test_ctx.is_pass = false;
_os_mem_cpy(d, disp_test_ctx.rsn, err_rsn[1], sizeof(err_rsn[1]));
goto test_end;
}
disp_test_ctx.ops.msg_hdlr = test_gen_mdl_msg_hdlr;
if(dispr_register_module(disp_eng->dispatcher[0], PHL_MDL_GENERAL, &(disp_test_ctx.ops)) != RTW_PHL_STATUS_SUCCESS) {
disp_test_ctx.is_pass = false;
_os_mem_cpy(d, disp_test_ctx.rsn, err_rsn[1], sizeof(err_rsn[1]));
goto test_end;
}
disp_test_ctx.ops.msg_hdlr = test_mcc_mdl_msg_hdlr;
if(dispr_register_module(disp_eng->dispatcher[0], PHL_MDL_MRC, &(disp_test_ctx.ops)) != RTW_PHL_STATUS_SUCCESS) {
disp_test_ctx.is_pass = false;
_os_mem_cpy(d, disp_test_ctx.rsn, err_rsn[1], sizeof(err_rsn[1]));
goto test_end;
}
disp_test_ctx.ops.msg_hdlr = test_ser_mdl_msg_hdlr;
if(dispr_register_module(disp_eng->dispatcher[0], PHL_MDL_SER, &(disp_test_ctx.ops)) != RTW_PHL_STATUS_SUCCESS) {
disp_test_ctx.is_pass = false;
_os_mem_cpy(d, disp_test_ctx.rsn, err_rsn[1], sizeof(err_rsn[1]));
goto test_end;
}
disp_test_ctx.ops.msg_hdlr = test_mdl_msg_hdlr;
_os_mem_set(d, &msg, 0, sizeof(struct phl_msg));
_os_mem_set(d, &attr, 0, sizeof(struct phl_msg_attribute));
seq.pre_prot_phase.map[PHL_MDL_PRI_OPTIONAL].len = sizeof(pre_proct_seq);
seq.pre_prot_phase.map[PHL_MDL_PRI_OPTIONAL].id_arr = pre_proct_seq;
seq.pre_prot_phase.map[PHL_MDL_PRI_MANDATORY].len = sizeof(pre_proct_seq);
seq.pre_prot_phase.map[PHL_MDL_PRI_MANDATORY].id_arr = pre_proct_seq;
seq.post_prot_phase.map[PHL_MDL_PRI_OPTIONAL].len = sizeof(post_proct_seq);
seq.post_prot_phase.map[PHL_MDL_PRI_OPTIONAL].id_arr = post_proct_seq;
seq.post_prot_phase.map[PHL_MDL_PRI_MANDATORY].len = sizeof(post_proct_seq);
seq.post_prot_phase.map[PHL_MDL_PRI_MANDATORY].id_arr = post_proct_seq;
for (i = 0 ; i < 3; i++) {
status = phl_disp_eng_set_msg_disp_seq(disp_test_ctx.phl, &attr, &seq);
if(status != RTW_PHL_STATUS_SUCCESS || attr.dispr_attr == NULL) {
disp_test_ctx.is_pass = false;
_os_mem_cpy(d, disp_test_ctx.rsn, err_rsn[24], sizeof(err_rsn[24]));
goto test_end;
}
}
msg.band_idx = 0;
SET_MSG_MDL_ID_FIELD(msg.msg_id, PHL_MDL_SER);
SET_MSG_EVT_ID_FIELD(msg.msg_id, 10);
status = phl_disp_eng_send_msg(disp_test_ctx.phl, &msg, &attr, NULL);
if(status != RTW_PHL_STATUS_SUCCESS) {
disp_test_ctx.is_pass = false;
_os_mem_cpy(d, disp_test_ctx.rsn, err_rsn[10], sizeof(err_rsn[10]));
goto test_end;
}
if (attr.dispr_attr != NULL) {
disp_test_ctx.is_pass = false;
_os_mem_cpy(d, disp_test_ctx.rsn, err_rsn[24], sizeof(err_rsn[24]));
goto test_end;
}
_os_sleep_ms(d, 100);
PHL_INFO("[DISP_TEST] seq:%d, cnt: %d\n",disp_test_ctx.case_self_def_seq.seq, disp_test_ctx.case_self_def_seq.cnt);
if(disp_test_ctx.case_self_def_seq.seq != 8 || disp_test_ctx.case_self_def_seq.cnt != 8) {
disp_test_ctx.is_pass = false;
_os_mem_cpy(d, disp_test_ctx.rsn, err_rsn[25], sizeof(err_rsn[25]));
goto test_end;
}
status = phl_disp_eng_set_msg_disp_seq(disp_test_ctx.phl, &attr, &seq);
if(status != RTW_PHL_STATUS_SUCCESS || attr.dispr_attr == NULL) {
disp_test_ctx.is_pass = false;
_os_mem_cpy(d, disp_test_ctx.rsn, err_rsn[24], sizeof(err_rsn[24]));
goto test_end;
}
SET_MSG_MDL_ID_FIELD(msg.msg_id, PHL_MDL_POWER_MGNT);
SET_MSG_EVT_ID_FIELD(msg.msg_id, 10);
status = phl_disp_eng_send_msg(disp_test_ctx.phl, &msg, &attr, NULL);
if(status == RTW_PHL_STATUS_SUCCESS) {
disp_test_ctx.is_pass = false;
_os_mem_cpy(d, disp_test_ctx.rsn, err_rsn[10], sizeof(err_rsn[10]));
goto test_end;
}
if (attr.dispr_attr != NULL) {
disp_test_ctx.is_pass = false;
_os_mem_cpy(d, disp_test_ctx.rsn, err_rsn[24], sizeof(err_rsn[24]));
goto test_end;
}
test_end:
status = phl_disp_eng_stop(disp_test_ctx.phl);
status = phl_disp_eng_deinit(disp_test_ctx.phl);
disp_test_ctx.is_test_end = true;
}
u8 disp_test_bp_handler(void *priv, struct test_bp_info* bp_info)
{
return true;
}
u8 disp_test_fail_rsn(void *priv,char* rsn, u32 max_len)
{
void *d = phl_to_drvpriv(disp_test_ctx.phl);
_os_mem_cpy(d, rsn, disp_test_ctx.rsn, (_os_strlen((u8*)disp_test_ctx.rsn) > max_len)?(max_len):(_os_strlen((u8*)disp_test_ctx.rsn)));
return true;
}
u8 disp_test_is_test_end(void *priv)
{
return disp_test_ctx.is_test_end;
}
u8 disp_test_is_test_pass(void *priv)
{
PHL_INFO("[DISP_TEST] case(%s) pass(%d)\n", disp_test_ctx.test_case,
disp_test_ctx.is_pass);
return disp_test_ctx.is_pass;
}
u8 disp_test_start_test(void *priv)
{
PHL_INFO("[DISP_TEST] case(%s) start\n", disp_test_ctx.test_case);
switch(disp_test_ctx.test_case) {
case DISP_TEST_INIT:
/* verify items:
* 1: dispatch engine & dispatcher:
* init, start, stop, deinit
* 2: module ops:
* init, start, stop, deinit
* 3: double init
*/
disp_init_test();
break;
case DISP_TEST_SEND_MSG:
/* verify items:
* 1: msg send, cancel, recycle efficiency
* 2: msg process flow in background, including
* pre-phase, post phase, reschedule, fail handle
*/
disp_send_msg_test();
break;
case DISP_TEST_CMD_TOKEN_REQ:
/* verify items:
* 1: cmd send, cancel, recycle
* 2: cmd token process flow in token mgnt thread
* 3: cmd req ops verification
*/
disp_cmd_token_test();
break;
case DISP_TEST_CANNOT_IO:
disp_cannot_io_test();
break;
case DISP_TEST_PRIORITIZE_MSG:
disp_prioritize_msg_test();
break;
case DISP_TEST_SELF_DEF_SEQ:
disp_self_def_msg_test();
break;
default:
disp_test_ctx.is_test_end = true;
disp_test_ctx.is_pass = false;
break;
}
PHL_INFO("[DISP_TEST] case(%s) pass(%d)\n", disp_test_ctx.test_case,
disp_test_ctx.is_pass);
return true;
}
void phl_cmd_disp_test_start(void* phl_info, u8 test_case)
{
struct phl_info_t *phl = (struct phl_info_t *)phl_info;
void *d = phl_to_drvpriv(phl);
struct test_obj_ctrl_interface *intf = NULL;
_os_mem_set(d, &disp_test_ctx, 0, sizeof(struct cmd_disp_test_ctx));
disp_test_ctx.phl = phl;
disp_test_ctx.is_test_end = false;
disp_test_ctx.is_pass = false;
disp_test_ctx.test_case = test_case;
_os_mem_cpy(d, &(disp_test_ctx.rsn), "none", sizeof("none"));
intf = &(disp_test_ctx.intf);
/* test obj ops*/
intf->bp_handler = disp_test_bp_handler;
intf->get_fail_rsn = disp_test_fail_rsn;
intf->is_test_end = disp_test_is_test_end;
intf->is_test_pass = disp_test_is_test_pass;
intf->start_test = disp_test_start_test;
/* phl bk module ops*/
disp_test_ctx.ops.init = test_mdl_init;
disp_test_ctx.ops.deinit = test_mdl_deinit;
disp_test_ctx.ops.start = test_mdl_start;
disp_test_ctx.ops.stop = test_mdl_stop;
disp_test_ctx.ops.msg_hdlr = test_mdl_msg_hdlr;
disp_test_ctx.ops.set_info = test_mdl_set_info;
disp_test_ctx.ops.query_info = test_mdl_query_info;
/* cmd token req*/
disp_test_ctx.req.acquired = test_req_acquired;
disp_test_ctx.req.abort = test_req_abort;
disp_test_ctx.req.msg_hdlr = test_req_msg_hdlr;
disp_test_ctx.req.set_info = test_req_set_info;
disp_test_ctx.req.query_info = test_req_query_info;
rtw_phl_test_add_new_test_obj(phl->phl_com,
"phl_cmd_disp_test_init",
&disp_test_ctx,
TEST_LVL_LOW,
intf,
10000,
TEST_SUB_MODULE_TRX,
UNIT_TEST_MODE);
PHL_INFO("%s\n",__FUNCTION__);
}
#endif
#endif /* #ifdef CONFIG_PHL_TEST_SUITE */
|
2301_81045437/rtl8852be
|
phl/test/cmd_disp_test.c
|
C
|
agpl-3.0
| 35,084
|
/******************************************************************************
*
* Copyright(c) 2019 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#ifndef _CMD_DISP_TEST_H_
#define _CMD_DISP_TEST_H_
#ifdef CONFIG_PHL_TEST_SUITE
//#define ENABLE_CMD_DISP_TEST
#ifdef ENABLE_CMD_DISP_TEST
void phl_cmd_disp_test_start(void* phl_info, u8 test_case);
#else
#define phl_cmd_disp_test_start(_phl, _case, _mode)
#endif
#endif
#endif /*_TRX_TEST_H_*/
|
2301_81045437/rtl8852be
|
phl/test/cmd_disp_test.h
|
C
|
agpl-3.0
| 947
|
/******************************************************************************
*
* Copyright(c) 2019 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#define _PHL_TEST_MP_C_
#include "../../phl_headers.h"
#include "phl_test_mp_def.h"
#include "phl_test_mp_api.h"
#include "../../hal_g6/test/mp/hal_test_mp_api.h"
#ifdef CONFIG_PHL_TEST_MP
void mp_notification_complete(void* priv, struct phl_msg* msg)
{
struct mp_context *mp_ctx = (struct mp_context *)priv;
if(msg->inbuf){
PHL_INFO("%s: Free info buf\n", __FUNCTION__);
_os_kmem_free(mp_ctx->phl_com->drv_priv, msg->inbuf, msg->inlen);
}
}
void mp_cmd_done_notification(struct mp_context *mp_ctx, enum mp_class mp_class,
u8 mp_cmd_id)
{
struct phl_msg msg = {0};
struct phl_msg_attribute attr;
u8 *info = NULL;
info = _os_kmem_alloc(mp_ctx->phl_com->drv_priv, 2);
if(info == NULL){
PHL_ERR("%s: Allocate msg hub buffer fail!\n", __FUNCTION__);
return;
}
info[0] = mp_class;
info[1] = mp_cmd_id;
SET_MSG_MDL_ID_FIELD(msg.msg_id, PHL_FUNC_MDL_TEST_MODULE);
SET_MSG_EVT_ID_FIELD(msg.msg_id, MSG_EVT_MP_CMD_DONE);
attr.completion.completion = mp_notification_complete;
attr.completion.priv = mp_ctx;
msg.inbuf = info;
msg.inlen = 2;
if (phl_msg_hub_send(mp_ctx->phl,
&attr, &msg) != RTW_PHL_STATUS_SUCCESS) {
PHL_ERR("%s: send msg_hub failed\n", __func__);
_os_kmem_free(mp_ctx->phl_com->drv_priv, info, 2);
}
}
/*
* @enum phl_msg_evt_id id: Assign different types of MP related msg event
* to pass buffer to another layer for further process
*/
void mp_buf_notification(struct mp_context *mp_ctx, void *buf, u32 buf_len,
enum phl_msg_evt_id id)
{
struct phl_msg msg = {0};
struct phl_msg_attribute attr;
u8 *info = NULL;
info = _os_kmem_alloc(mp_ctx->phl_com->drv_priv, buf_len);
if(info == NULL){
PHL_ERR("%s: Allocate msg hub buffer fail!\n", __FUNCTION__);
return;
}
_os_mem_cpy(mp_ctx->phl_com->drv_priv, info, buf, buf_len);
SET_MSG_MDL_ID_FIELD(msg.msg_id, PHL_FUNC_MDL_TEST_MODULE);
SET_MSG_EVT_ID_FIELD(msg.msg_id, id);
attr.completion.completion = mp_notification_complete;
attr.completion.priv = mp_ctx;
msg.inbuf = info;
msg.inlen = buf_len;
if (phl_msg_hub_send(mp_ctx->phl, &attr, &msg) != RTW_PHL_STATUS_SUCCESS) {
PHL_ERR("%s: send msg_hub failed\n", __func__);
_os_kmem_free(mp_ctx->phl_com->drv_priv, info, buf_len);
}
}
bool mp_get_rpt_check(struct mp_context *mp_ctx, void *rpt_buf)
{
bool ret = true;
struct mp_arg_hdr *rpt_hdr = (struct mp_arg_hdr *)mp_ctx->rpt;
struct mp_arg_hdr *rpt_buf_hdr = (struct mp_arg_hdr *)rpt_buf;
if((rpt_hdr->mp_class != rpt_buf_hdr->mp_class) ||
(rpt_hdr->cmd != rpt_buf_hdr->cmd)) {
PHL_WARN("%s: Report buffer not match!\n", __FUNCTION__);
rpt_buf_hdr->cmd_ok = true;
rpt_buf_hdr->status = RTW_PHL_STATUS_FAILURE;
ret = false;
}
return ret;
}
u8 mp_get_class_from_buf(struct mp_context *mp_ctx)
{
u8 *buf_tmp = NULL;
u8 mp_class = MP_CLASS_MAX;
if(mp_ctx && mp_ctx->buf) {
buf_tmp = (u8 *)mp_ctx->buf;
mp_class = buf_tmp[0];
}
return mp_class;
}
u8 mp_bp_handler(void *priv, struct test_bp_info* bp_info)
{
struct mp_context *mp_ctx = (struct mp_context *)priv;
enum rtw_phl_status phl_status = RTW_PHL_STATUS_FAILURE;
PHL_DBG("%s: bp_info->type = %x\n", __FUNCTION__, bp_info->type);
switch(bp_info->type){
case BP_INFO_TYPE_MP_CMD_EVENT:
if(mp_ctx->status == MP_STATUS_WAIT_CMD) {
mp_ctx->status = MP_STATUS_CMD_EVENT;
_os_sema_up(mp_ctx->phl_com->drv_priv,&(mp_ctx->mp_cmd_sema));
phl_status = RTW_PHL_STATUS_SUCCESS;
}
break;
case BP_INFO_TYPE_MP_RX_PHYSTS:
if(mp_ctx->rx_physts == true) {
u32 i = 0;
for(i = 0; i < (bp_info->len/4); i ++)
PHL_DBG("0x%08X\n",
*((u32 *)(bp_info->ptr)+i));
mp_buf_notification(mp_ctx,
bp_info->ptr,
bp_info->len,
MSG_EVT_MP_RX_PHYSTS);
}
break;
case BP_INFO_TYPE_NONE:
case BP_INFO_TYPE_WAIT_BEACON_JOIN:
case BP_INFO_TYPE_SEND_AUTH_ODD:
case BP_INFO_TYPE_SEND_ASOC_REQ:
case BP_INFO_TYPE_SEND_DISASSOC:
case BP_INFO_TYPE_FILL_DISASSOC_RSN:
case BP_INFO_TYPE_SEND_PROBE_REQ:
case BP_INFO_TYPE_RX_TEST_WPRPT:
case BP_INFO_TYPE_RX_TEST_PATTERN:
case BP_INFO_TYPE_MAX:
PHL_TRACE(COMP_PHL_DBG, _PHL_DEBUG_, "mp_bp_handler(): Unsupported case:%d, please check it\n",
bp_info->type);
break;
default:
PHL_TRACE(COMP_PHL_DBG, _PHL_DEBUG_, "mp_bp_handler(): Unrecognize case:%d, please check it\n",
bp_info->type);
break;
}
return phl_status;
}
u8 mp_get_fail_rsn(void *priv,char* rsn, u32 max_len)
{
//struct mp_context *mp_ctx = (struct mp_context *)priv;
return true;
}
u8 mp_is_test_end(void *priv)
{
struct mp_context *mp_ctx = (struct mp_context *)priv;
return mp_ctx->is_mp_test_end;
}
u8 mp_is_test_pass(void *priv)
{
//struct mp_context *mp_ctx = (struct mp_context *)priv;
return true;
}
u8 mp_start(void *priv)
{
struct mp_context *mp_ctx = (struct mp_context *)priv;
struct rtw_phl_com_t* phl_com = mp_ctx->phl_com;
enum rtw_phl_status phl_status = RTW_PHL_STATUS_FAILURE;
u8 mp_class = MP_CLASS_MAX;
FUNCIN();
while(!mp_is_test_end(mp_ctx)){
_os_sema_down(phl_com->drv_priv,&(mp_ctx->mp_cmd_sema));
if(mp_ctx->status == MP_STATUS_CMD_EVENT){
mp_ctx->status = MP_STATUS_RUN_CMD;
mp_class = mp_get_class_from_buf(mp_ctx);
/* Clear report buffer before executing next command */
if(mp_ctx->rpt != NULL) {
PHL_INFO("%s: Report not empty, cleanup!\n", __FUNCTION__);
_os_mem_free(phl_com->drv_priv, mp_ctx->rpt, mp_ctx->rpt_len);
mp_ctx->rpt = NULL;
mp_ctx->rpt_len = 0;
}
switch(mp_class){
case MP_CLASS_CONFIG:
PHL_INFO("%s: class = MP_CLASS_CONFIG\n", __FUNCTION__);
phl_status = mp_config(mp_ctx, (struct mp_config_arg *)mp_ctx->buf);
break;
case MP_CLASS_TX:
PHL_INFO("%s: class = MP_CLASS_TX\n", __FUNCTION__);
phl_status = mp_tx(mp_ctx, (struct mp_tx_arg *)mp_ctx->buf);
break;
case MP_CLASS_RX:
PHL_INFO("%s: class = MP_CLASS_RX\n", __FUNCTION__);
phl_status = mp_rx(mp_ctx, (struct mp_rx_arg *)mp_ctx->buf);
break;
case MP_CLASS_EFUSE:
PHL_INFO("%s: class = MP_CLASS_EFUSE\n", __FUNCTION__);
phl_status = mp_efuse(mp_ctx, (struct mp_efuse_arg *)mp_ctx->buf);
break;
case MP_CLASS_REG:
PHL_INFO("%s: class = MP_CLASS_REG\n", __FUNCTION__);
phl_status = mp_reg(mp_ctx, (struct mp_reg_arg *)mp_ctx->buf);
break;
case MP_CLASS_TXPWR:
PHL_INFO("%s: class = MP_CLASS_TXPWR\n", __FUNCTION__);
phl_status = mp_txpwr(mp_ctx, (struct mp_txpwr_arg *)mp_ctx->buf);
break;
case MP_CLASS_CAL:
PHL_INFO("%s: class = MP_CLASS_CAL\n", __FUNCTION__);
phl_status = mp_cal(mp_ctx, (struct mp_cal_arg *)mp_ctx->buf);
break;
default:
PHL_WARN("%s: Unknown mp class! (%d)\n", __FUNCTION__, mp_class);
break;
}
if(mp_ctx->rpt != NULL) {
struct mp_arg_hdr *hdr = (struct mp_arg_hdr *)mp_ctx->rpt;
mp_cmd_done_notification(mp_ctx, hdr->mp_class, hdr->cmd);
PHL_INFO("%s: Indication class(%d) cmd(%d)\n",
__FUNCTION__, hdr->mp_class, hdr->cmd);
}
/* Clear command buffer after executing the command */
if(mp_ctx->buf != NULL) {
PHL_INFO("%s: Command buf not empty, cleanup!\n", __FUNCTION__);
_os_mem_free(phl_com->drv_priv, mp_ctx->buf, mp_ctx->buf_len);
mp_ctx->buf = NULL;
mp_ctx->buf_len = 0;
}
mp_ctx->status = MP_STATUS_WAIT_CMD;
}
}
FUNCOUT();
return (u8)phl_status;
}
void mp_change_mode(struct mp_context *mp_ctx, enum rtw_drv_mode driver_mode)
{
struct phl_info_t *phl_info = mp_ctx->phl;
PHL_INFO("%s Change to %x\n", __FUNCTION__, driver_mode);
/* Need PHL stop function later */
phl_info->phl_com->drv_mode = driver_mode;
#ifdef RTW_WKARD_MP_MODE_CHANGE
#else
rtw_phl_reset(phl_info);
#endif
if(true == phl_is_mp_mode(phl_info->phl_com)) {
/* Load bt map to shadow map */
rtw_hal_mp_efuse_bt_shadow_reload(mp_ctx);
rtw_hal_acpt_crc_err_pkt(mp_ctx->hal,mp_ctx->cur_phy,true);
}
else {
rtw_hal_acpt_crc_err_pkt(mp_ctx->hal,mp_ctx->cur_phy,false);
}
#ifdef CONFIG_DBCC_SUPPORT
if (phl_info->phl_com->dev_cap.dbcc_sup)
rtw_hal_dbcc_cfg(mp_ctx->hal, phl_info->phl_com, true);
#endif
rtw_hal_mp_ic_hw_setting_init(mp_ctx);
rtw_hal_mp_cfg(phl_info->phl_com ,mp_ctx->hal);
rtw_hal_mp_ic_hw_setting_init(mp_ctx);
}
enum rtw_phl_status phl_test_mp_alloc(struct phl_info_t *phl_info, void *hal, void **mp)
{
enum rtw_phl_status phl_status = RTW_PHL_STATUS_FAILURE;
struct rtw_phl_com_t *phl_com = phl_info->phl_com;
struct mp_context *mp_ctx = NULL;
mp_ctx = _os_mem_alloc(phl_com->drv_priv, sizeof(struct mp_context));
if (mp_ctx == NULL) {
PHL_ERR("alloc mp_context failed\n");
phl_status = RTW_PHL_STATUS_RESOURCE;
goto exit;
}
_os_sema_init(phl_com->drv_priv,&(mp_ctx->mp_cmd_sema), 0);
mp_ctx->cur_phy = HW_PHY_0;
mp_ctx->rx_physts = false;
mp_ctx->phl = phl_info;
mp_ctx->phl_com = phl_com;
mp_ctx->hal = hal;
mp_ctx->status = MP_STATUS_INIT;
*mp = mp_ctx;
phl_status = RTW_PHL_STATUS_SUCCESS;
exit:
return phl_status;
}
void phl_test_mp_free(void **mp)
{
struct mp_context *mp_ctx = NULL;
if(*mp == NULL)
return;
mp_ctx = (struct mp_context *)(*mp);
_os_sema_free(mp_ctx->phl_com->drv_priv, &(mp_ctx->mp_cmd_sema));
_os_mem_free(mp_ctx->phl_com->drv_priv, mp_ctx, sizeof(struct mp_context));
mp_ctx = NULL;
*mp = NULL;
}
void phl_test_mp_init(void *mp)
{
struct mp_context *mp_ctx = NULL;
struct test_obj_ctrl_interface *pctrl = NULL;
if(mp == NULL)
return;
mp_ctx = (struct mp_context *)mp;
pctrl = &(mp_ctx->mp_test_ctrl);
mp_ctx->status = MP_STATUS_WAIT_CMD;
mp_ctx->is_mp_test_end = false;
pctrl->bp_handler = mp_bp_handler;
pctrl->get_fail_rsn = mp_get_fail_rsn;
pctrl->is_test_end = mp_is_test_end;
pctrl->is_test_pass = mp_is_test_pass;
pctrl->start_test = mp_start;
rtw_phl_test_add_new_test_obj(mp_ctx->phl_com,
"mp_test",
mp_ctx,
TEST_LVL_LOW,
pctrl,
-1,
TEST_SUB_MODULE_MP,
INTGR_TEST_MODE);
}
void phl_test_mp_deinit(void *mp)
{
struct mp_context *mp_ctx = NULL;
if(mp == NULL)
return;
mp_ctx = (struct mp_context *)mp;
if(mp_ctx->status < MP_STATUS_WAIT_CMD)
return;
mp_ctx->is_mp_test_end = true;
_os_sema_up(mp_ctx->phl_com->drv_priv,&(mp_ctx->mp_cmd_sema));
mp_ctx->status = MP_STATUS_INIT;
}
void phl_test_mp_start(void *mp, u8 tm_mode)
{
struct mp_context *mp_ctx = NULL;
if(mp == NULL)
return;
mp_ctx = (struct mp_context *)mp;
mp_change_mode(mp_ctx, tm_mode);
}
void phl_test_mp_stop(void *mp, u8 tm_mode)
{
struct mp_context *mp_ctx = NULL;
if(mp == NULL)
return;
mp_ctx = (struct mp_context *)mp;
if(mp_ctx->status < MP_STATUS_WAIT_CMD)
return;
mp_change_mode(mp_ctx, tm_mode);
}
void phl_test_mp_cmd_process(void *mp, void *buf, u32 buf_len, u8 submdid)
{
struct mp_context *mp_ctx = NULL;
struct rtw_phl_com_t *phl_com = NULL;
struct test_bp_info bp_info;
FUNCIN();
if(mp == NULL)
return;
mp_ctx = (struct mp_context *)mp;
phl_com = mp_ctx->phl_com;
if((buf == NULL) || (buf_len <= 0)) {
PHL_ERR("%s: Invalid buffer content!\n", __func__);
return;
}
if(mp_ctx->status == MP_STATUS_WAIT_CMD) {
mp_ctx->buf_len = buf_len;
mp_ctx->buf = _os_mem_alloc(phl_com->drv_priv, buf_len);
_os_mem_cpy(phl_com->drv_priv, mp_ctx->buf, buf, buf_len);
_os_mem_set(phl_com->drv_priv, &bp_info, 0, sizeof(struct test_bp_info));
bp_info.type = BP_INFO_TYPE_MP_CMD_EVENT;
rtw_phl_test_setup_bp(phl_com, &bp_info, submdid);
}
else {
PHL_WARN("%s: Previous command is still running!\n", __FUNCTION__);
}
FUNCOUT();
}
void phl_test_mp_get_rpt(void *mp, void *buf, u32 buf_len)
{
struct mp_context *mp_ctx = NULL;
FUNCIN();
if(mp == NULL) {
PHL_WARN("%s: mp is NULL!\n", __FUNCTION__);
goto exit;
}
mp_ctx = (struct mp_context *)mp;
if(mp_ctx->status != MP_STATUS_WAIT_CMD) {
PHL_WARN("%s: command is running!\n", __FUNCTION__);
goto exit;
}
if(mp_ctx->rpt == NULL) {
PHL_DBG("%s: mp_ctx->rpt is NULL!\n", __FUNCTION__);
goto exit;
}
if(buf_len < mp_ctx->rpt_len) {
PHL_WARN("%s: buffer not enough!\n", __FUNCTION__);
goto exit;
}
if(mp_get_rpt_check(mp_ctx, buf) == true) {
_os_mem_cpy(mp_ctx->phl_com->drv_priv, buf, mp_ctx->rpt, mp_ctx->rpt_len);
_os_mem_free(mp_ctx->phl_com->drv_priv, mp_ctx->rpt, mp_ctx->rpt_len);
mp_ctx->rpt = NULL;
mp_ctx->rpt_len = 0;
}
exit:
FUNCOUT();
}
#endif /* CONFIG_PHL_TEST_MP */
|
2301_81045437/rtl8852be
|
phl/test/mp/phl_test_mp.c
|
C
|
agpl-3.0
| 13,206
|
/******************************************************************************
*
* Copyright(c) 2019 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#ifndef _PHL_TEST_MP_API_H_
#define _PHL_TEST_MP_API_H_
#ifdef CONFIG_PHL_TEST_MP
enum rtw_phl_status mp_config(struct mp_context *mp, struct mp_config_arg *arg);
enum rtw_phl_status mp_tx(struct mp_context *mp, struct mp_tx_arg *arg);
enum rtw_phl_status mp_rx(struct mp_context *mp, struct mp_rx_arg *arg);
enum rtw_phl_status mp_reg(struct mp_context *mp, struct mp_reg_arg *arg);
enum rtw_phl_status mp_efuse(struct mp_context *mp, struct mp_efuse_arg *arg);
enum rtw_phl_status mp_txpwr(struct mp_context *mp, struct mp_txpwr_arg *arg);
enum rtw_phl_status mp_cal(struct mp_context *mp, struct mp_cal_arg *arg);
#endif /* CONFIG_PHL_TEST_MP */
#endif /* _PHL_TEST_MP_API_H_ */
|
2301_81045437/rtl8852be
|
phl/test/mp/phl_test_mp_api.h
|
C
|
agpl-3.0
| 1,333
|
/******************************************************************************
*
* Copyright(c) 2019 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*****************************************************************************/
#define _PHL_TEST_MP_CAL_C_
#include "../../phl_headers.h"
#include "phl_test_mp_def.h"
#include "../../hal_g6/test/mp/hal_test_mp_api.h"
#ifdef CONFIG_PHL_TEST_MP
static enum rtw_phl_status phl_mp_cal_trigger(
struct mp_context *mp, struct mp_cal_arg *arg)
{
enum rtw_hal_status hal_status = RTW_HAL_STATUS_FAILURE;
hal_status = rtw_hal_mp_cal_trigger(mp, arg);
PHL_INFO("%s: status = %d\n", __FUNCTION__, hal_status);
/* Record the result */
arg->cmd_ok = true;
arg->status = hal_status;
/* Transfer to report */
mp->rpt = arg;
mp->rpt_len = sizeof(struct mp_cal_arg);
mp->buf = NULL;
mp->buf_len = 0;
return RTW_PHL_STATUS_SUCCESS;
}
static enum rtw_phl_status phl_mp_cal_set_capability(
struct mp_context *mp, struct mp_cal_arg *arg)
{
enum rtw_hal_status hal_status = RTW_HAL_STATUS_FAILURE;
hal_status = rtw_hal_mp_cal_set_capability(mp, arg);
PHL_INFO("%s: status = %d\n", __FUNCTION__, hal_status);
/* Record the result */
arg->cmd_ok = true;
arg->status = hal_status;
/* Transfer to report */
mp->rpt = arg;
mp->rpt_len = sizeof(struct mp_cal_arg);
mp->buf = NULL;
mp->buf_len = 0;
return RTW_PHL_STATUS_SUCCESS;
}
static enum rtw_phl_status phl_mp_cal_get_capability(
struct mp_context *mp, struct mp_cal_arg *arg)
{
enum rtw_hal_status hal_status = RTW_HAL_STATUS_FAILURE;
hal_status = rtw_hal_mp_cal_get_capability(mp, arg);
PHL_INFO("%s: status = %d\n", __FUNCTION__, hal_status);
PHL_INFO("enable = %d. \n", arg->enable);
/* Record the result */
arg->cmd_ok = true;
arg->status = hal_status;
/* Transfer to report */
mp->rpt = arg;
mp->rpt_len = sizeof(struct mp_cal_arg);
mp->buf = NULL;
mp->buf_len = 0;
return RTW_PHL_STATUS_SUCCESS;
}
static enum rtw_phl_status phl_mp_cal_get_tssi_de(
struct mp_context *mp, struct mp_cal_arg *arg)
{
enum rtw_hal_status hal_status = RTW_HAL_STATUS_FAILURE;
hal_status = rtw_hal_mp_cal_get_tssi_de(mp, arg);
PHL_INFO("%s: status = %d\n", __FUNCTION__, hal_status);
/* Record the result */
arg->cmd_ok = true;
arg->status = hal_status;
/* Transfer to report */
mp->rpt = arg;
mp->rpt_len = sizeof(struct mp_cal_arg);
mp->buf = NULL;
mp->buf_len = 0;
return RTW_PHL_STATUS_SUCCESS;
}
static enum rtw_phl_status phl_mp_cal_set_tssi_de(
struct mp_context *mp, struct mp_cal_arg *arg)
{
enum rtw_hal_status hal_status = RTW_HAL_STATUS_FAILURE;
hal_status = rtw_hal_mp_cal_set_tssi_de(mp, arg);
PHL_INFO("%s: status = %d\n", __FUNCTION__, hal_status);
/* Record the result */
arg->cmd_ok = true;
arg->status = hal_status;
/* Transfer to report */
mp->rpt = arg;
mp->rpt_len = sizeof(struct mp_cal_arg);
mp->buf = NULL;
mp->buf_len = 0;
return RTW_PHL_STATUS_SUCCESS;
}
static enum rtw_phl_status phl_mp_cal_get_txpwr_final_abs(
struct mp_context *mp, struct mp_cal_arg *arg)
{
enum rtw_hal_status hal_status = RTW_HAL_STATUS_FAILURE;
hal_status = rtw_hal_mp_cal_get_txpwr_final_abs(mp, arg);
PHL_INFO("%s: status = %d\n", __FUNCTION__, hal_status);
/* Record the result */
arg->cmd_ok = true;
arg->status = hal_status;
/* Transfer to report */
mp->rpt = arg;
mp->rpt_len = sizeof(struct mp_cal_arg);
mp->buf = NULL;
mp->buf_len = 0;
return RTW_PHL_STATUS_SUCCESS;
}
static enum rtw_phl_status phl_mp_cal_trigger_dpk_tracking(
struct mp_context *mp, struct mp_cal_arg *arg)
{
enum rtw_hal_status hal_status = RTW_HAL_STATUS_FAILURE;
hal_status = rtw_hal_mp_cal_trigger_dpk_tracking(mp, arg);
PHL_INFO("%s: status = %d\n", __FUNCTION__, hal_status);
/* Record the result */
arg->cmd_ok = true;
arg->status = hal_status;
/* Transfer to report */
mp->rpt = arg;
mp->rpt_len = sizeof(struct mp_cal_arg);
mp->buf = NULL;
mp->buf_len = 0;
return RTW_PHL_STATUS_SUCCESS;
}
static enum rtw_phl_status phl_mp_cal_set_tssi_avg(
struct mp_context *mp, struct mp_cal_arg *arg)
{
enum rtw_hal_status hal_status = RTW_HAL_STATUS_FAILURE;
hal_status = rtw_hal_mp_set_tssi_avg(mp, arg);
PHL_INFO("%s: status = %d\n", __FUNCTION__, hal_status);
/* Record the result */
arg->cmd_ok = true;
arg->status = hal_status;
/* Transfer to report */
mp->rpt = arg;
mp->rpt_len = sizeof(struct mp_cal_arg);
mp->buf = NULL;
mp->buf_len = 0;
return RTW_PHL_STATUS_SUCCESS;
}
/* PSD */
static enum rtw_phl_status phl_mp_cal_psd_init(
struct mp_context *mp, struct mp_cal_arg *arg)
{
enum rtw_hal_status hal_status = RTW_HAL_STATUS_FAILURE;
hal_status = rtw_hal_mp_psd_init(mp, arg);
PHL_INFO("%s: status = %d\n", __FUNCTION__, hal_status);
/* Record the result */
arg->cmd_ok = true;
arg->status = hal_status;
/* Transfer to report */
mp->rpt = arg;
mp->rpt_len = sizeof(struct mp_cal_arg);
mp->buf = NULL;
mp->buf_len = 0;
return RTW_PHL_STATUS_SUCCESS;
}
static enum rtw_phl_status phl_mp_cal_psd_restore(
struct mp_context *mp, struct mp_cal_arg *arg)
{
enum rtw_hal_status hal_status = RTW_HAL_STATUS_FAILURE;
hal_status = rtw_hal_mp_psd_restore(mp, arg);
PHL_INFO("%s: status = %d\n", __FUNCTION__, hal_status);
/* Record the result */
arg->cmd_ok = true;
arg->status = hal_status;
/* Transfer to report */
mp->rpt = arg;
mp->rpt_len = sizeof(struct mp_cal_arg);
mp->buf = NULL;
mp->buf_len = 0;
return RTW_PHL_STATUS_SUCCESS;
}
static enum rtw_phl_status phl_mp_cal_psd_get_point_data(
struct mp_context *mp, struct mp_cal_arg *arg)
{
enum rtw_hal_status hal_status = RTW_HAL_STATUS_FAILURE;
hal_status = rtw_hal_mp_psd_get_point_data(mp, arg);
PHL_INFO("%s: status = %d\n", __FUNCTION__, hal_status);
/* Record the result */
arg->cmd_ok = true;
arg->status = hal_status;
/* Transfer to report */
mp->rpt = arg;
mp->rpt_len = sizeof(struct mp_cal_arg);
mp->buf = NULL;
mp->buf_len = 0;
return RTW_PHL_STATUS_SUCCESS;
}
static enum rtw_phl_status phl_mp_cal_psd_query(
struct mp_context *mp, struct mp_cal_arg *arg)
{
enum rtw_hal_status hal_status = RTW_HAL_STATUS_FAILURE;
hal_status = rtw_hal_mp_psd_query(mp, arg);
PHL_INFO("%s: status = %d\n", __FUNCTION__, hal_status);
/* Record the result */
arg->cmd_ok = true;
arg->status = hal_status;
/* Transfer to report */
mp->rpt = arg;
mp->rpt_len = sizeof(struct mp_cal_arg);
mp->buf = NULL;
mp->buf_len = 0;
return RTW_PHL_STATUS_SUCCESS;
}
enum rtw_phl_status mp_cal(struct mp_context *mp, struct mp_cal_arg *arg)
{
enum rtw_phl_status phl_status = RTW_PHL_STATUS_FAILURE;
switch(arg->cmd){
case MP_CAL_CMD_TRIGGER_CAL:
PHL_INFO("%s: CMD = MP_CAL_CMD_TRIGGER_CAL\n",
__FUNCTION__);
phl_status = phl_mp_cal_trigger(mp, arg);
break;
case MP_CAL_CMD_SET_CAPABILITY_CAL:
PHL_INFO("%s: CMD = MP_CAL_CMD_SET_CAPABILITY_CAL\n",
__FUNCTION__);
phl_status = phl_mp_cal_set_capability(mp, arg);
break;
case MP_CAL_CMD_GET_CAPABILITY_CAL:
PHL_INFO("%s: CMD = MP_CAL_CMD_GET_CAPABILITY_CAL\n",
__FUNCTION__);
phl_status = phl_mp_cal_get_capability(mp, arg);
break;
case MP_CAL_CMD_GET_TSSI_DE_VALUE:
PHL_INFO("%s: CMD = MP_CAL_CMD_GET_TSSI_DE_VALUE\n",
__FUNCTION__);
phl_status = phl_mp_cal_get_tssi_de(mp, arg);
break;
case MP_CAL_CMD_SET_TSSI_DE_TX_VERIFY:
PHL_INFO("%s: CMD = MP_CAL_CMD_SET_TSST_DE_TX_VERIFY\n",
__FUNCTION__);
phl_status = phl_mp_cal_set_tssi_de(mp, arg);
break;
case MP_CAL_CMD_GET_TXPWR_FINAL_ABS:
PHL_INFO("%s: CMD = MP_CAL_CMD_GET_TXPWR_FINAL_ABS\n",
__FUNCTION__);
phl_status = phl_mp_cal_get_txpwr_final_abs(mp, arg);
break;
case MP_CAL_CMD_TRIGGER_DPK_TRACKING:
PHL_INFO("%s: CMD = MP_CAL_CMD_TRIGGER_DPK_TRACKING\n",
__FUNCTION__);
phl_status = phl_mp_cal_trigger_dpk_tracking(mp, arg);
break;
case MP_CAL_CMD_SET_TSSI_AVG:
PHL_INFO("%s: CMD = MP_CAL_CMD_SET_TSSI_AVG\n",
__FUNCTION__);
phl_status = phl_mp_cal_set_tssi_avg(mp, arg);
break;
case MP_CAL_CMD_PSD_INIT:
PHL_INFO("%s: CMD = MP_CAL_CMD_PSD_INIT\n",
__FUNCTION__);
phl_status = phl_mp_cal_psd_init(mp, arg);
break;
case MP_CAL_CMD_PSD_RESTORE:
PHL_INFO("%s: CMD = MP_CAL_CMD_PSD_RESTORE\n",
__FUNCTION__);
phl_status = phl_mp_cal_psd_restore(mp, arg);
break;
case MP_CAL_CMD_PSD_GET_POINT_DATA:
PHL_INFO("%s: CMD = MP_CAL_CMD_PSD_GET_POINT_DATA\n",
__FUNCTION__);
phl_status = phl_mp_cal_psd_get_point_data(mp, arg);
break;
case MP_CAL_CMD_PSD_QUERY:
PHL_INFO("%s: CMD = MP_CAL_CMD_PSD_QUERY\n",
__FUNCTION__);
phl_status = phl_mp_cal_psd_query(mp, arg);
break;
default:
PHL_WARN("%s: CMD NOT RECOGNIZED\n", __FUNCTION__);
break;
}
return phl_status;
}
#endif /* CONFIG_PHL_TEST_MP */
|
2301_81045437/rtl8852be
|
phl/test/mp/phl_test_mp_cal.c
|
C
|
agpl-3.0
| 9,146
|